[llvm-branch-commits] [llvm] d86a00d - [RISCV] Define vslideup/vslidedown intrinsics

ShihPo Hung via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sun Dec 20 05:14:05 PST 2020


Author: ShihPo Hung
Date: 2020-12-20T05:08:15-08:00
New Revision: d86a00d8febd0138a21f92d1420c4b62d7acb0ca

URL: https://github.com/llvm/llvm-project/commit/d86a00d8febd0138a21f92d1420c4b62d7acb0ca
DIFF: https://github.com/llvm/llvm-project/commit/d86a00d8febd0138a21f92d1420c4b62d7acb0ca.diff

LOG: [RISCV] Define vslideup/vslidedown intrinsics

Differential Revision: https://reviews.llvm.org/D93286

Added: 
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index df289d9714f7..4f3b65fbd033 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -283,6 +283,17 @@ let TargetPrefix = "riscv" in {
     let ExtendOperand = 1;
   }
 
+  class RISCVTernaryAAAXNoMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
+                     LLVMMatchType<1>],
+                    [IntrNoMem]>, RISCVVIntrinsic;
+  class RISCVTernaryAAAXMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
+                    [IntrNoMem]>, RISCVVIntrinsic;
+
   multiclass RISCVUSLoad {
     def "int_riscv_" # NAME : RISCVUSLoad;
     def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
@@ -328,6 +339,10 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
     def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
   }
+  multiclass RISCVTernaryAAAX {
+    def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
+    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
+  }
 
   defm vle : RISCVUSLoad;
   defm vse : RISCVUSStore;
@@ -428,4 +443,7 @@ let TargetPrefix = "riscv" in {
   defm vfsgnj : RISCVBinaryAAX;
   defm vfsgnjn : RISCVBinaryAAX;
   defm vfsgnjx : RISCVBinaryAAX;
+
+  defm vslideup : RISCVTernaryAAAX;
+  defm vslidedown : RISCVTernaryAAAX;
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 52c4211a5855..ded287ff1268 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -561,6 +561,28 @@ class VPseudoBinaryCarryIn<VReg RetClass,
   let VLMul = MInfo.value;
 }
 
+class VPseudoTernaryNoMask<VReg RetClass,
+                           VReg Op1Class,
+                           DAGOperand Op2Class,
+                           string Constraint> :
+        Pseudo<(outs RetClass:$rd),
+               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
+                    GPR:$vl, ixlenimm:$sew),
+               []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 4;
+  let SEWIndex = 5;
+  let MergeOpIndex = 1;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 multiclass VPseudoUSLoad {
   foreach lmul = MxList.m in {
     defvar LInfo = lmul.MX;
@@ -821,6 +843,32 @@ multiclass VPseudoBinaryV_WV_WX_WI {
   defm "" : VPseudoBinaryV_WI;
 }
 
+multiclass VPseudoTernary<VReg RetClass,
+                          VReg Op1Class,
+                          RegisterClass Op2Class,
+                          LMULInfo MInfo,
+                          string Constraint = ""> {
+  let VLMul = MInfo.value in {
+    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
+    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
+  }
+}
+
+multiclass VPseudoTernaryV_VX<string Constraint = ""> {
+  foreach m = MxList.m in
+    defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
+}
+
+multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
+  foreach m = MxList.m in
+    defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
+}
+
+multiclass VPseudoTernaryV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+  defm "" : VPseudoTernaryV_VX<Constraint>;
+  defm "" : VPseudoTernaryV_VI<ImmType, Constraint>;
+}
+
 //===----------------------------------------------------------------------===//
 // Helpers to define the SDNode patterns.
 //===----------------------------------------------------------------------===//
@@ -919,6 +967,54 @@ class VPatBinaryMask<string intrinsic_name,
                    ToFPR32<op2_type, op2_kind, "rs2">.ret,
                    (mask_type V0), (NoX0 GPR:$vl), sew)>;
 
+class VPatTernaryNoMask<string intrinsic,
+                        string inst,
+                        string kind,
+                        ValueType result_type,
+                        ValueType op1_type,
+                        ValueType op2_type,
+                        ValueType mask_type,
+                        int sew,
+                        LMULInfo vlmul,
+                        VReg result_reg_class,
+                        RegisterClass op1_reg_class,
+                        DAGOperand op2_kind> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic)
+                    (result_type result_reg_class:$rs3),
+                    (op1_type op1_reg_class:$rs1),
+                    (op2_type op2_kind:$rs2),
+                    (XLenVT GPR:$vl))),
+                   (!cast<Instruction>(inst#_#kind#"_"# vlmul.MX)
+                    result_reg_class:$rs3,
+                    ToFPR32<op1_type, op1_reg_class, "rs1">.ret,
+                    op2_kind:$rs2,
+                    (NoX0 GPR:$vl), sew)>;
+
+class VPatTernaryMask<string intrinsic,
+                      string inst,
+                      string kind,
+                      ValueType result_type,
+                      ValueType op1_type,
+                      ValueType op2_type,
+                      ValueType mask_type,
+                      int sew,
+                      LMULInfo vlmul,
+                      VReg result_reg_class,
+                      RegisterClass op1_reg_class,
+                      DAGOperand op2_kind> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
+                    (result_type result_reg_class:$rs3),
+                    (op1_type op1_reg_class:$rs1),
+                    (op2_type op2_kind:$rs2),
+                    (mask_type V0),
+                    (XLenVT GPR:$vl))),
+                   (!cast<Instruction>(inst#_#kind#"_"# vlmul.MX # "_MASK")
+                    result_reg_class:$rs3,
+                    ToFPR32<op1_type, op1_reg_class, "rs1">.ret,
+                    op2_kind:$rs2,
+                    (mask_type V0),
+                    (NoX0 GPR:$vl), sew)>;
+
 multiclass VPatUSLoad<string intrinsic,
                       string inst,
                       LLVMType type,
@@ -1359,6 +1455,50 @@ multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
   defm "" : VPatBinaryV_X<intrinsic, instruction>;
 }
 
+multiclass VPatTernary<string intrinsic,
+                       string inst,
+                       string kind,
+                       ValueType result_type,
+                       ValueType op1_type,
+                       ValueType op2_type,
+                       ValueType mask_type,
+                       int sew,
+                       LMULInfo vlmul,
+                       VReg result_reg_class,
+                       RegisterClass op1_reg_class,
+                       DAGOperand op2_kind> {
+  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
+                    mask_type, sew, vlmul, result_reg_class, op1_reg_class,
+                    op2_kind>;
+  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
+                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
+                        op2_kind>;
+}
+
+multiclass VPatTernaryV_VX<string intrinsic, string instruction,
+                           list<VTypeInfo> vtilist> {
+  foreach vti = vtilist in
+    defm : VPatTernary<intrinsic, instruction, "VX",
+                       vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                       vti.SEW, vti.LMul, vti.RegClass,
+                       vti.RegClass, GPR>;
+}
+
+multiclass VPatTernaryV_VI<string intrinsic, string instruction,
+                           list<VTypeInfo> vtilist, Operand Imm_type> {
+  foreach vti = vtilist in
+    defm : VPatTernary<intrinsic, instruction, "VI",
+                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                      vti.SEW, vti.LMul, vti.RegClass,
+                      vti.RegClass, Imm_type>;
+}
+
+multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
+                              list<VTypeInfo> vtilist, Operand Imm_type = simm5> {
+  defm "" : VPatTernaryV_VX<intrinsic, instruction, vtilist>;
+  defm "" : VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
+}
+
 //===----------------------------------------------------------------------===//
 // Pseudo instructions and patterns.
 //===----------------------------------------------------------------------===//
@@ -1606,6 +1746,12 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
 }
 } // Predicates = [HasStdExtV, HasStdExtF]
 
+//===----------------------------------------------------------------------===//
+// 17.3. Vector Slide Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVSLIDEUP    : VPseudoTernaryV_VX_VI<uimm5, "@earlyclobber $rd">;
+defm PseudoVSLIDEDOWN  : VPseudoTernaryV_VX_VI<uimm5>;
+
 //===----------------------------------------------------------------------===//
 // Patterns.
 //===----------------------------------------------------------------------===//
@@ -1848,6 +1994,10 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVector
 
 } // Predicates = [HasStdExtV, HasStdExtF]
 
+//===----------------------------------------------------------------------===//
+// 17. Vector Permutation Instructions
+//===----------------------------------------------------------------------===//
+
 //===----------------------------------------------------------------------===//
 // 17.1. Integer Scalar Move Instructions
 //===----------------------------------------------------------------------===//
@@ -1893,3 +2043,16 @@ foreach fvti = AllFloatVectors in {
              (NoX0 GPR:$vl), fvti.SEW)>;
 }
 } // Predicates = [HasStdExtV, HasStdExtF]
+
+//===----------------------------------------------------------------------===//
+// 17.3. Vector Slide Instructions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtV] in {
+  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
+  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
+} // Predicates = [HasStdExtV]
+
+let Predicates = [HasStdExtV, HasStdExtF] in {
+  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
+  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
+} // Predicates = [HasStdExtV, HasStdExtF]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
new file mode 100644
index 000000000000..dd7acdf89294
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
@@ -0,0 +1,1705 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i32,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i32,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i32,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i32,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i32,
+  i32);
+
+define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
new file mode 100644
index 000000000000..154c67dc52f1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
@@ -0,0 +1,2131 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i64,
+  i64);
+
+define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i64,
+  i64);
+
+define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i64,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i64,
+  i64);
+
+define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i64,
+  i64);
+
+define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i64,
+  i64);
+
+define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i64,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i64,
+  i64);
+
+define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i64,
+  i64);
+
+define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i64,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i64,
+  i64);
+
+define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i64,
+  i64);
+
+define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
new file mode 100644
index 000000000000..6a589eaafeaa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
@@ -0,0 +1,1705 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i32,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i32,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i32,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i32,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i32,
+  i32);
+
+define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 9,
+    i32 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
new file mode 100644
index 000000000000..74b652f41be9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
@@ -0,0 +1,2131 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i64,
+  i64);
+
+define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i64,
+  i64);
+
+define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i64,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i64,
+  i64);
+
+define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i64,
+  i64);
+
+define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half> %1,
+    i64 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i64,
+  i64);
+
+define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i64,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i64,
+  i64);
+
+define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i64,
+  i64);
+
+define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i64,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i64,
+  i64);
+
+define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i64,
+  i64);
+
+define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 9,
+    i64 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}


        


More information about the llvm-branch-commits mailing list