[llvm-branch-commits] [llvm] 3183add - [RISCV] Define the remaining vector fixed-point arithmetic intrinsics.

Monk Chiang via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sun Dec 20 23:05:39 PST 2020


Author: Monk Chiang
Date: 2020-12-20T22:57:07-08:00
New Revision: 3183add5343e58329c58e2164e015b15fd2cb1d5

URL: https://github.com/llvm/llvm-project/commit/3183add5343e58329c58e2164e015b15fd2cb1d5
DIFF: https://github.com/llvm/llvm-project/commit/3183add5343e58329c58e2164e015b15fd2cb1d5.diff

LOG: [RISCV] Define the remaining vector fixed-point arithmetic intrinsics.

  This patch base on D93366, and define vector fixed-point intrinsics.
    1. vaaddu/vaadd/vasubu/vasub
    2. vsmul
    3. vssrl/vssra
    4. vnclipu/vnclip

  We work with @rogfer01 from BSC to come out this patch.

  Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
  Co-Authored-by: ShihPo Hung <shihpo.hung at sifive.com>

  Differential Revision: https://reviews.llvm.org/D93508

Added: 
    llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/lib/Target/RISCV/RISCVRegisterInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 4f3b65fbd033..055d6baa5b8d 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -274,6 +274,25 @@ let TargetPrefix = "riscv" in {
                     [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
     let ExtendOperand = 3;
   }
+  // For Saturating binary operations.
+  // The destination vector type is NOT the same as first source vector.
+  // Input: (vector_in, vector_in/scalar_in, vl)
+  class RISCVSaturatingBinaryABXNoMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
+                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
+    let ExtendOperand = 2;
+  }
+  // For Saturating binary operations with mask.
+  // The destination vector type is NOT the same as first source vector (with mask).
+  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
+  class RISCVSaturatingBinaryABXMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
+    let ExtendOperand = 3;
+  }
 
   // For vmv.v.v, vmv.v.x, vmv.v.i
   // Input: (vector_in/scalar_in, vl)
@@ -339,6 +358,10 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
     def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
   }
+  multiclass RISCVSaturatingBinaryABX {
+    def "int_riscv_" # NAME : RISCVSaturatingBinaryABXNoMask;
+    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABXMask;
+  }
   multiclass RISCVTernaryAAAX {
     def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
     def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
@@ -446,4 +469,17 @@ let TargetPrefix = "riscv" in {
 
   defm vslideup : RISCVTernaryAAAX;
   defm vslidedown : RISCVTernaryAAAX;
+
+  defm vaaddu : RISCVSaturatingBinaryAAX;
+  defm vaadd : RISCVSaturatingBinaryAAX;
+  defm vasubu : RISCVSaturatingBinaryAAX;
+  defm vasub : RISCVSaturatingBinaryAAX;
+
+  defm vsmul : RISCVSaturatingBinaryAAX;
+
+  defm vssrl : RISCVSaturatingBinaryAAX;
+  defm vssra : RISCVSaturatingBinaryAAX;
+
+  defm vnclipu : RISCVSaturatingBinaryABX;
+  defm vnclip : RISCVSaturatingBinaryABX;
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index ded287ff1268..050b17c440f5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1658,6 +1658,39 @@ let Defs = [VXSAT], hasSideEffects = 1 in {
   defm PseudoVSSUB       : VPseudoBinaryV_VV_VX;
 }
 
+//===----------------------------------------------------------------------===//
+// 13.2. Vector Single-Width Averaging Add and Subtract
+//===----------------------------------------------------------------------===//
+let Uses = [VL, VTYPE, VXRM], hasSideEffects = 1 in {
+  defm PseudoVAADDU      : VPseudoBinaryV_VV_VX;
+  defm PseudoVAADD       : VPseudoBinaryV_VV_VX;
+  defm PseudoVASUBU      : VPseudoBinaryV_VV_VX;
+  defm PseudoVASUB       : VPseudoBinaryV_VV_VX;
+}
+
+//===----------------------------------------------------------------------===//
+// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
+//===----------------------------------------------------------------------===//
+let Uses = [VL, VTYPE, VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
+  defm PseudoVSMUL      : VPseudoBinaryV_VV_VX;
+}
+
+//===----------------------------------------------------------------------===//
+// 13.4. Vector Single-Width Scaling Shift Instructions
+//===----------------------------------------------------------------------===//
+let Uses = [VL, VTYPE, VXRM], hasSideEffects = 1 in {
+  defm PseudoVSSRL        : VPseudoBinaryV_VV_VX_VI<uimm5>;
+  defm PseudoVSSRA        : VPseudoBinaryV_VV_VX_VI<uimm5>;
+}
+
+//===----------------------------------------------------------------------===//
+// 13.5. Vector Narrowing Fixed-Point Clip Instructions
+//===----------------------------------------------------------------------===//
+let Uses = [VL, VTYPE, VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
+  defm PseudoVNCLIP     : VPseudoBinaryV_WV_WX_WI;
+  defm PseudoVNCLIPU    : VPseudoBinaryV_WV_WX_WI;
+}
+
 } // Predicates = [HasStdExtV]
 
 let Predicates = [HasStdExtV, HasStdExtF] in {
@@ -1955,6 +1988,33 @@ defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVecto
 defm "" : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
 defm "" : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
 
+//===----------------------------------------------------------------------===//
+// 13.2. Vector Single-Width Averaging Add and Subtract
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>;
+defm "" : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>;
+defm "" : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>;
+defm "" : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>;
+
+//===----------------------------------------------------------------------===//
+// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>;
+
+//===----------------------------------------------------------------------===//
+// 13.4. Vector Single-Width Scaling Shift Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
+                               uimm5>;
+defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
+                               uimm5>;
+
+//===----------------------------------------------------------------------===//
+// 13.5. Vector Narrowing Fixed-Point Clip Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
+defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
+
 } // Predicates = [HasStdExtV]
 
 let Predicates = [HasStdExtV, HasStdExtF] in {

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 69abf8402011..631077ef83f5 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -99,6 +99,7 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
   markSuperRegs(Reserved, RISCV::VL);
   markSuperRegs(Reserved, RISCV::VTYPE);
   markSuperRegs(Reserved, RISCV::VXSAT);
+  markSuperRegs(Reserved, RISCV::VXRM);
 
   assert(checkAllSuperRegsMarked(Reserved));
   return Reserved;

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 442cb2e4b0b8..fdac1eeb4fe4 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -380,6 +380,7 @@ let RegAltNameIndices = [ABIRegAltName] in {
   def VTYPE  : RISCVReg<0, "vtype", ["vtype"]>;
   def VL     : RISCVReg<0, "vl", ["vl"]>;
   def VXSAT  : RISCVReg<0, "vxsat", ["vxsat"]>;
+  def VXRM   : RISCVReg<0, "vxrm", ["vxrm"]>;
 }
 
 class RegisterTypes<list<ValueType> reg_types> {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
new file mode 100644
index 000000000000..a100fc08b696
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
@@ -0,0 +1,1441 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
new file mode 100644
index 000000000000..727adb8a4232
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
@@ -0,0 +1,1761 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
new file mode 100644
index 000000000000..7a240b49e89d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
@@ -0,0 +1,1441 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
new file mode 100644
index 000000000000..a3d347ab0628
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
@@ -0,0 +1,1761 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vaaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
new file mode 100644
index 000000000000..d47b8664ddda
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
@@ -0,0 +1,1441 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
new file mode 100644
index 000000000000..c71df1577481
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
@@ -0,0 +1,1761 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
new file mode 100644
index 000000000000..521bba6c94e9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
@@ -0,0 +1,1441 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
new file mode 100644
index 000000000000..b74b4026b8df
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
@@ -0,0 +1,1761 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vasubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
new file mode 100644
index 000000000000..e366aed945b9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
@@ -0,0 +1,1189 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i16>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i16>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i16>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i16>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i16>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i16>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i32>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i32>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i32>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i32>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i32>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
new file mode 100644
index 000000000000..e7d45d2d9353
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
@@ -0,0 +1,1621 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclip.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i16>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i16>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i16>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i16>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i16>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i16>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i32>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i32>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i32>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i32>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i32>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32(
+  <vscale x 1 x i64>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclip_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i64>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i64> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32(
+  <vscale x 2 x i64>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclip_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i64>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i64> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32(
+  <vscale x 4 x i64>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclip_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i64>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i64> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32(
+  <vscale x 8 x i64>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclip_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wx_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i64>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wx_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclip.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i64> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vnclip_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i64> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vnclip_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i64> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vnclip_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i64> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vnclip_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_wi_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclip_mask_wi_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclip.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i64> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
new file mode 100644
index 000000000000..d7e957f86e98
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
@@ -0,0 +1,1189 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i16>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i16>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i16>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i16>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i16>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i16>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i32>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i32>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i32>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i32>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i32>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
new file mode 100644
index 000000000000..fe635213e06f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
@@ -0,0 +1,1621 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclipu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i16>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i16>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i16>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i16>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i16>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i16>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i16>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i16>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i16>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i16>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i16>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i16>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i32>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i32>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i32>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i32>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i32>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i32>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i32>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i32>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i32>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i32>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32(
+  <vscale x 1 x i64>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclipu_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i64>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i64> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32(
+  <vscale x 2 x i64>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclipu_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i64>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i64> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32(
+  <vscale x 4 x i64>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclipu_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i64>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i64> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32(
+  <vscale x 8 x i64>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclipu_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wx_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i64>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wx_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclipu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i64> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i8_nxv1i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i16> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i8_nxv2i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i16> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i8_nxv4i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i16> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i8_nxv8i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i16> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i8_nxv16i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i16> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv32i8_nxv32i16_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i16> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i16_nxv1i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i32> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i16_nxv2i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i32> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i16_nxv4i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i32> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i16_nxv8i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i32> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv16i16_nxv16i32_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i32> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vnclipu_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv1i32_nxv1i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i64> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vnclipu_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv2i32_nxv2i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i64> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vnclipu_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv4i32_nxv4i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i64> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vnclipu_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_wi_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vnclipu_mask_wi_nxv8i32_nxv8i64_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vnclipu.wi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i64> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
new file mode 100644
index 000000000000..fb9fc7c90876
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
@@ -0,0 +1,1441 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
new file mode 100644
index 000000000000..c418e428ef0d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
@@ -0,0 +1,1761 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
new file mode 100644
index 000000000000..7cb0e4e5cbd5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
@@ -0,0 +1,1945 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 9,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
new file mode 100644
index 000000000000..044dc7f7df0f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
@@ -0,0 +1,2377 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 9,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_vi_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
new file mode 100644
index 000000000000..aa69e0d2cea4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
@@ -0,0 +1,1945 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 9,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
new file mode 100644
index 000000000000..bf797f6d38ab
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
@@ -0,0 +1,2377 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 9,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vssrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}


        


More information about the llvm-branch-commits mailing list