[llvm] 346c1f2 - [RISCV] Support vector crypto extension LLVM IR

via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 2 10:25:52 PDT 2023


Author: 4vtomat
Date: 2023-08-02T10:25:36-07:00
New Revision: 346c1f26415a03d0308313f29ffc311a60158a69

URL: https://github.com/llvm/llvm-project/commit/346c1f26415a03d0308313f29ffc311a60158a69
DIFF: https://github.com/llvm/llvm-project/commit/346c1f26415a03d0308313f29ffc311a60158a69.diff

LOG: [RISCV] Support vector crypto extension LLVM IR

Depends on D141672

Differential Revision: https://reviews.llvm.org/D138809

Added: 
    llvm/test/CodeGen/RISCV/rvv/vaesdf.ll
    llvm/test/CodeGen/RISCV/rvv/vaesdm.ll
    llvm/test/CodeGen/RISCV/rvv/vaesef.ll
    llvm/test/CodeGen/RISCV/rvv/vaesem.ll
    llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll
    llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll
    llvm/test/CodeGen/RISCV/rvv/vaesz.ll
    llvm/test/CodeGen/RISCV/rvv/vandn.ll
    llvm/test/CodeGen/RISCV/rvv/vbrev.ll
    llvm/test/CodeGen/RISCV/rvv/vbrev8.ll
    llvm/test/CodeGen/RISCV/rvv/vclmul.ll
    llvm/test/CodeGen/RISCV/rvv/vclmulh.ll
    llvm/test/CodeGen/RISCV/rvv/vclz.ll
    llvm/test/CodeGen/RISCV/rvv/vcpopv.ll
    llvm/test/CodeGen/RISCV/rvv/vctz.ll
    llvm/test/CodeGen/RISCV/rvv/vghsh.ll
    llvm/test/CodeGen/RISCV/rvv/vgmul.ll
    llvm/test/CodeGen/RISCV/rvv/vrev8.ll
    llvm/test/CodeGen/RISCV/rvv/vrol.ll
    llvm/test/CodeGen/RISCV/rvv/vror.ll
    llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll
    llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll
    llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll
    llvm/test/CodeGen/RISCV/rvv/vsm3c.ll
    llvm/test/CodeGen/RISCV/rvv/vsm3me.ll
    llvm/test/CodeGen/RISCV/rvv/vsm4k.ll
    llvm/test/CodeGen/RISCV/rvv/vsm4r.ll
    llvm/test/CodeGen/RISCV/rvv/vwsll.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 4d7bfbd5669d12..3934d32f88a998 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -296,6 +296,23 @@ let TargetPrefix = "riscv" in {
                     [IntrNoMem]>, RISCVVIntrinsic {
     let VLOperand = 2;
   }
+  // For destination vector type is the same as the source vector type
+  // Input: (passthru, vector_in, vl, policy)
+  class RISCVUnaryAAUnMaskedZvk<bit IsVS>
+        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>),
+                     llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)],
+                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
+    let VLOperand = 2;
+  }
+
+  multiclass RISCVUnaryAAUnMaskedZvk<bit HasVV = 1, bit HasVS = 1> {
+    if HasVV then
+      def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk</*IsVS*/0>;
+
+    if HasVS then
+      def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk</*IsVS*/1>;
+  }
   // For destination vector type is the same as first source vector (with mask).
   // Input: (vector_in, vector_in, mask, vl, policy)
   class RISCVUnaryAAMasked
@@ -402,11 +419,25 @@ let TargetPrefix = "riscv" in {
   }
   // For destination vector type is the same as first source vector.
   // Input: (passthru, vector_in, vector_in/scalar_in, vl)
-  class RISCVBinaryAAXUnMasked
+  class RISCVBinaryAAXUnMasked<bit IsVI = 0>
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                      llvm_anyint_ty],
-                    [IntrNoMem]>, RISCVVIntrinsic {
+                    !listconcat([IntrNoMem],
+                                !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
+                    RISCVVIntrinsic {
+    let ScalarOperand = 2;
+    let VLOperand = 3;
+  }
+  // For destination vector type is the same as the source vector type.
+  // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy)
+  class RISCVBinaryAAXUnMaskedZvk<bit IsVI = 0>
+        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                                [LLVMMatchType<0>, LLVMMatchType<0>,
+                                 llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>],
+                                !listconcat([ImmArg<ArgIndex<4>>, IntrNoMem],
+                                            !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
+                                RISCVVIntrinsic {
     let ScalarOperand = 2;
     let VLOperand = 3;
   }
@@ -1840,6 +1871,54 @@ def int_riscv_sm3p1      : ScalarCryptoGprIntrinsic32;
 } // TargetPrefix = "riscv"
 
 //===----------------------------------------------------------------------===//
+// Vector Cryptography
+//
+// These intrinsics will lower directly into the corresponding instructions
+// added by the vector cyptography extension, if the extension is present.
+let TargetPrefix = "riscv" in {
+  // Zvbb
+  defm vandn             : RISCVBinaryAAX;
+  defm vbrev             : RISCVUnaryAA;
+  defm vbrev8            : RISCVUnaryAA;
+  defm vrev8             : RISCVUnaryAA;
+  defm vclz              : RISCVUnaryAA;
+  defm vctz              : RISCVUnaryAA;
+  defm vcpopv            : RISCVUnaryAA;
+  defm vrol              : RISCVBinaryAAX;
+  defm vror              : RISCVBinaryAAX;
+  defm vwsll             : RISCVBinaryABX;
+
+  // Zvbc
+  defm vclmul            : RISCVBinaryAAX;
+  defm vclmulh           : RISCVBinaryAAX;
+
+  // Zvkg
+  def int_riscv_vghsh    : RISCVBinaryAAXUnMaskedZvk;
+  def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk</*IsVS*/0>;
+
+  // Zvkned
+  defm vaesdf            : RISCVUnaryAAUnMaskedZvk;
+  defm vaesdm            : RISCVUnaryAAUnMaskedZvk;
+  defm vaesef            : RISCVUnaryAAUnMaskedZvk;
+  defm vaesem            : RISCVUnaryAAUnMaskedZvk;
+  def int_riscv_vaeskf1  : RISCVBinaryAAXUnMasked</*IsVI*/1>;
+  def int_riscv_vaeskf2  : RISCVBinaryAAXUnMaskedZvk</*IsVI*/1>;
+  defm vaesz             : RISCVUnaryAAUnMaskedZvk</*HasVV*/0>;
+
+  // Zvknha or Zvknhb
+  def int_riscv_vsha2ch  : RISCVBinaryAAXUnMaskedZvk;
+  def int_riscv_vsha2cl  : RISCVBinaryAAXUnMaskedZvk;
+  def int_riscv_vsha2ms  : RISCVBinaryAAXUnMaskedZvk;
+
+  // Zvksed
+  def int_riscv_vsm4k    : RISCVBinaryAAXUnMasked</*IsVI*/1>;
+  defm vsm4r             : RISCVUnaryAAUnMaskedZvk;
+
+  // Zvksh
+  def int_riscv_vsm3c    : RISCVBinaryAAXUnMaskedZvk</*IsVI*/1>;
+  def int_riscv_vsm3me   : RISCVBinaryAAXUnMasked;
+} // TargetPrefix = "riscv"
+
 // Vendor extensions
 //===----------------------------------------------------------------------===//
 include "llvm/IR/IntrinsicsRISCVXTHead.td"

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index dbabce7c80b297..8d17d9cc9d6c5d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7190,6 +7190,14 @@ static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, ID, AVL, Sew, LMul);
 }
 
+// LMUL * VLEN should be greater than or equal to EGS * SEW
+static inline bool isValidEGW(int EGS, EVT VT,
+                              const RISCVSubtarget &Subtarget) {
+  return (Subtarget.getRealMinVLen() *
+             VT.getSizeInBits().getKnownMinValue()) / RISCV::RVVBitsPerBlock >=
+         EGS * VT.getScalarSizeInBits();
+}
+
 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                      SelectionDAG &DAG) const {
   unsigned IntNo = Op.getConstantOperandVal(0);
@@ -7310,6 +7318,48 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
                        Vec, VL);
   }
+  // EGS * EEW >= 128 bits
+  case Intrinsic::riscv_vaesdf_vv:
+  case Intrinsic::riscv_vaesdf_vs:
+  case Intrinsic::riscv_vaesdm_vv:
+  case Intrinsic::riscv_vaesdm_vs:
+  case Intrinsic::riscv_vaesef_vv:
+  case Intrinsic::riscv_vaesef_vs:
+  case Intrinsic::riscv_vaesem_vv:
+  case Intrinsic::riscv_vaesem_vs:
+  case Intrinsic::riscv_vaeskf1:
+  case Intrinsic::riscv_vaeskf2:
+  case Intrinsic::riscv_vaesz_vs:
+  case Intrinsic::riscv_vsm4k:
+  case Intrinsic::riscv_vsm4r_vv:
+  case Intrinsic::riscv_vsm4r_vs: {
+    if (!isValidEGW(4, Op.getSimpleValueType(), Subtarget) ||
+        !isValidEGW(4, Op->getOperand(1).getSimpleValueType(), Subtarget) ||
+        !isValidEGW(4, Op->getOperand(2).getSimpleValueType(), Subtarget))
+      report_fatal_error("EGW should be greater than or equal to 4 * SEW.");
+    return Op;
+  }
+  // EGS * EEW >= 256 bits
+  case Intrinsic::riscv_vsm3c:
+  case Intrinsic::riscv_vsm3me: {
+    if (!isValidEGW(8, Op.getSimpleValueType(), Subtarget) ||
+        !isValidEGW(8, Op->getOperand(1).getSimpleValueType(), Subtarget))
+      report_fatal_error("EGW should be greater than or equal to 8 * SEW.");
+    return Op;
+  }
+  // zvknha(SEW=32)/zvknhb(SEW=[32|64])
+  case Intrinsic::riscv_vsha2ch:
+  case Intrinsic::riscv_vsha2cl:
+  case Intrinsic::riscv_vsha2ms: {
+    if (Op->getSimpleValueType(0).getScalarSizeInBits() == 64 &&
+        !Subtarget.hasStdExtZvknhb())
+      report_fatal_error("SEW=64 needs Zvknhb to be enabled.");
+    if (!isValidEGW(4, Op.getSimpleValueType(), Subtarget) ||
+        !isValidEGW(4, Op->getOperand(1).getSimpleValueType(), Subtarget) ||
+        !isValidEGW(4, Op->getOperand(2).getSimpleValueType(), Subtarget))
+      report_fatal_error("EGW should be greater than or equal to 4 * SEW.");
+    return Op;
+  }
   }
 
   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 382e5c7b3d9723..91a4db0ad887d1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -237,7 +237,7 @@ def InsnDirectiveOpcode : AsmOperandClass {
   let PredicateMethod = "isImm";
 }
 
-def uimm6 : Operand<XLenVT> {
+def uimm6 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<6>(Imm);}]> {
   let ParserMatchClass = UImmAsmOperand<6>;
   let DecoderMethod = "decodeUImmOperand<6>";
   let OperandType = "OPERAND_UIMM6";

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 5f7672fd63d8a5..4e2ce84972cb8b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -192,7 +192,7 @@ defvar MxListFWRed = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
 // Use for zext/sext.vf2
 defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
 
-// Use for zext/sext.vf4
+// Use for zext/sext.vf4 and vector crypto instructions
 defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
 
 // Use for zext/sext.vf8
@@ -2015,6 +2015,19 @@ multiclass VPseudoBinary<VReg RetClass,
   }
 }
 
+multiclass VPseudoBinaryNoMask<VReg RetClass,
+                               VReg Op1Class,
+                               DAGOperand Op2Class,
+                               LMULInfo MInfo,
+                               string Constraint = "",
+                               int sew = 0> {
+  let VLMul = MInfo.value, SEW=sew in {
+    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
+    def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
+                                       Constraint>;
+  }
+}
+
 multiclass VPseudoBinaryRoundingMode<VReg RetClass,
                                      VReg Op1Class,
                                      DAGOperand Op2Class,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index d4f08e16ecbd9a..4df2cf9c684014 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -57,6 +57,18 @@ def rnum_2_14 : Operand<XLenVT>, ImmLeaf<XLenVT,
   let OperandNamespace = "RISCVOp";
 }
 
+def tuimm5 : Operand<XLenVT>, TImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> {
+  let ParserMatchClass = UImmAsmOperand<5>;
+  let EncoderMethod = "getUImmOpValue";
+  let DecoderMethod = "decodeUImmOperand<5>";
+  let MCOperandPredicate = [{
+    int64_t UImm;
+    if (MCOp.evaluateAsConstantImm(UImm))
+      return isUInt<5>(UImm);
+    return MCOp.isBareSymbolRef();
+  }];
+}
+
 //===----------------------------------------------------------------------===//
 // Instruction class templates
 //===----------------------------------------------------------------------===//
@@ -193,28 +205,242 @@ let Predicates = [HasStdExtZvksh], RVVConstraint = NoConstraint in {
 // Pseudo instructions
 //===----------------------------------------------------------------------===//
 
-defm PseudoVANDN : VPseudoVALU_VV_VX;
+defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32));
+defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors,
+                                      !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)));
+
+class ZvkI32IntegerVectors<string vd_lmul> {
+  list<VTypeInfo> vs2_types = !cond(!eq(vd_lmul, "M8")  : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)),
+                                    !eq(vd_lmul, "M4")  : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 32)),
+                                    !eq(vd_lmul, "M2")  : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 16)),
+                                    !eq(vd_lmul, "M1")  : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 8)),
+                                    !eq(vd_lmul, "MF2")  : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 4)),
+                                    !eq(vd_lmul, "MF4")  : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 2)),
+                                    !eq(vd_lmul, "MF8")  : !filter(vti, I32IntegerVectors, !le(vti.LMul.octuple, 1)));
+}
+
+class ZvkMxSet<string vd_lmul> {
+  list<LMULInfo> vs2_lmuls = !cond(!eq(vd_lmul, "M8")  : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4],
+                                   !eq(vd_lmul, "M4")  : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4],
+                                   !eq(vd_lmul, "M2")  : [V_MF8, V_MF4, V_MF2, V_M1, V_M2],
+                                   !eq(vd_lmul, "M1")  : [V_MF8, V_MF4, V_MF2, V_M1],
+                                   !eq(vd_lmul, "MF2") : [V_MF8, V_MF4, V_MF2],
+                                   !eq(vd_lmul, "MF4") : [V_MF8, V_MF4],
+                                   !eq(vd_lmul, "MF8") : [V_MF8]);
+}
+
+class VPseudoUnaryNoMask_Zvk<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
+      Pseudo<(outs RetClass:$rd),
+        (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasVecPolicyOp = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoBinaryNoMask_Zvk<VReg RetClass,
+                              VReg Op1Class,
+                              DAGOperand Op2Class,
+                              string Constraint> :
+        Pseudo<(outs RetClass:$rd),
+               (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
+                    AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasVecPolicyOp = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+multiclass VPseudoBinaryNoMask_Zvk<VReg RetClass,
+                                   VReg Op1Class,
+                                   DAGOperand Op2Class,
+                                   LMULInfo MInfo,
+                                   string Constraint = ""> {
+  let VLMul = MInfo.value in
+    def "_" # MInfo.MX : VPseudoBinaryNoMask_Zvk<RetClass, Op1Class, Op2Class,
+                                                 Constraint>;
+}
+
+multiclass VPseudoUnaryV_V_NoMask_Zvk<LMULInfo m, string Constraint = ""> {
+  let VLMul = m.value in {
+    def "_VV_" # m.MX : VPseudoUnaryNoMask_Zvk<m.vrclass, m.vrclass, Constraint>;
+  }
+}
+
+multiclass VPseudoUnaryV_S_NoMask_Zvk<LMULInfo m, string Constraint = ""> {
+  let VLMul = m.value in
+    foreach vs2_lmul = ZvkMxSet<m.MX>.vs2_lmuls in
+      def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoUnaryNoMask_Zvk<m.vrclass, vs2_lmul.vrclass, Constraint>;
+}
+
+multiclass VPseudoVALU_V_NoMask_Zvk<string Constraint = ""> {
+  foreach m = MxListVF4 in {
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
+
+    defm "" : VPseudoUnaryV_V_NoMask_Zvk<m, Constraint>,
+              Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+  }
+}
+
+multiclass VPseudoVALU_S_NoMask_Zvk<string Constraint = ""> {
+  foreach m = MxListVF4 in {
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
+
+    defm "" : VPseudoUnaryV_S_NoMask_Zvk<m, Constraint>,
+              Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+  }
+}
+
+multiclass VPseudoVALU_V_S_NoMask_Zvk<string Constraint = ""> {
+  defm "" : VPseudoVALU_V_NoMask_Zvk<Constraint>;
+  defm "" : VPseudoVALU_S_NoMask_Zvk<Constraint>;
+}
+
+multiclass VPseudoVALU_VV_NoMask_Zvk<string Constraint = ""> {
+  foreach m = MxListVF4 in {
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
 
-multiclass VPseudoUnaryV_V {
+    defm _VV : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m,
+                                       Constraint>,
+               Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+  }
+}
+
+multiclass VPseudoVALU_VI_NoMask_Zvk<Operand ImmType = simm5, string Constraint = ""> {
+  foreach m = MxListVF4 in {
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
+
+    defm _VI : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass, ImmType, m,
+                                       Constraint>,
+               Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+  }
+}
+
+multiclass VPseudoVALU_VI_NoMaskTU_Zvk<Operand ImmType = uimm5, string Constraint = ""> {
+  foreach m = MxListVF4 in {
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
+
+    defm _VI : VPseudoBinaryNoMask<m.vrclass, m.vrclass, ImmType, m,
+                                   Constraint>,
+               Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+  }
+}
+
+multiclass VPseudoVALU_VV_NoMaskTU_Zvk<string Constraint = ""> {
+  foreach m = MxListVF4 in {
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
+
+    defm _VV : VPseudoBinaryNoMask<m.vrclass, m.vrclass, m.vrclass, m,
+                                   Constraint>,
+               Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+  }
+}
+
+multiclass VPseudoVCLMUL_VV_VX {
   foreach m = MxList in {
-    let VLMul = m.value in {
-      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
-      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
-                                   RISCVMaskedPseudo<MaskIdx=2>;
-    }
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar WriteVIALUX_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
+    defvar ReadVIALUX_MX = !cast<SchedRead>("ReadVIALUX_" # mx);
+
+    defm "" : VPseudoBinaryV_VV<m>,
+              Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+    defm "" : VPseudoBinaryV_VX<m>,
+              Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>;
   }
 }
 
-defm PseudoVBREV : VPseudoUnaryV_V;
-defm PseudoVREV8 : VPseudoUnaryV_V;
-defm PseudoVCLZ : VPseudoUnaryV_V;
-defm PseudoVCTZ : VPseudoUnaryV_V;
-defm PseudoVCPOP : VPseudoUnaryV_V;
+multiclass VPseudoUnaryV_V<LMULInfo m> {
+  let VLMul = m.value in {
+    defvar suffix = "_V_" # m.MX;
+    def suffix : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
+    def suffix # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
+                                            RISCVMaskedPseudo</*MaskOpIdx*/ 2>;
+  }
+}
+
+multiclass VPseudoVALU_V {
+  foreach m = MxList in {
+    defvar mx = m.MX;
+    defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
+    defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
+
+    defm "" : VPseudoUnaryV_V<m>,
+              Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
+  }
+}
 
-defm PseudoVROL : VPseudoVALU_VV_VX;
-defm PseudoVROR : VPseudoVALU_VV_VX_VI<uimm6>;
+let Predicates = [HasStdExtZvbb] in {
+  defm PseudoVANDN  : VPseudoVALU_VV_VX;
+  defm PseudoVBREV  : VPseudoVALU_V;
+  defm PseudoVBREV8 : VPseudoVALU_V;
+  defm PseudoVREV8  : VPseudoVALU_V;
+  defm PseudoVCLZ   : VPseudoVALU_V;
+  defm PseudoVCTZ   : VPseudoVALU_V;
+  defm PseudoVCPOP  : VPseudoVALU_V;
+  defm PseudoVROL   : VPseudoVALU_VV_VX;
+  defm PseudoVROR   : VPseudoVALU_VV_VX_VI<uimm6>;
+  defm PseudoVWSLL : VPseudoVWALU_VV_VX_VI<uimm5>;
+} // Predicates = [HasStdExtZvbb]
 
-defm PseudoVWSLL : VPseudoVWALU_VV_VX_VI<uimm5>;
+let Predicates = [HasStdExtZvbc] in {
+  defm PseudoVCLMUL  : VPseudoVCLMUL_VV_VX;
+  defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX;
+} // Predicates = [HasStdExtZvbc]
+
+let Predicates = [HasStdExtZvkg] in {
+  defm PseudoVGHSH : VPseudoVALU_VV_NoMask_Zvk;
+  defm PseudoVGMUL : VPseudoVALU_V_NoMask_Zvk;
+} // Predicates = [HasStdExtZvkg]
+
+let Predicates = [HasStdExtZvkned] in {
+  defm PseudoVAESDF  : VPseudoVALU_V_S_NoMask_Zvk;
+  defm PseudoVAESDM  : VPseudoVALU_V_S_NoMask_Zvk;
+  defm PseudoVAESEF  : VPseudoVALU_V_S_NoMask_Zvk;
+  defm PseudoVAESEM  : VPseudoVALU_V_S_NoMask_Zvk;
+  defm PseudoVAESKF1 : VPseudoVALU_VI_NoMaskTU_Zvk;
+  defm PseudoVAESKF2 : VPseudoVALU_VI_NoMask_Zvk<uimm5>;
+  defm PseudoVAESZ   : VPseudoVALU_S_NoMask_Zvk;
+} // Predicates = [HasStdExtZvkned]
+
+let Predicates = [HasStdExtZvknha] in {
+  defm PseudoVSHA2CH : VPseudoVALU_VV_NoMask_Zvk;
+  defm PseudoVSHA2CL : VPseudoVALU_VV_NoMask_Zvk;
+  defm PseudoVSHA2MS : VPseudoVALU_VV_NoMask_Zvk;
+} // Predicates = [HasStdExtZvknha]
+
+let Predicates = [HasStdExtZvksed] in {
+  defm PseudoVSM4K : VPseudoVALU_VI_NoMaskTU_Zvk;
+  defm PseudoVSM4R : VPseudoVALU_V_S_NoMask_Zvk;
+} // Predicates = [HasStdExtZvksed]
+
+let Predicates = [HasStdExtZvksh] in {
+  defm PseudoVSM3C  : VPseudoVALU_VI_NoMask_Zvk<uimm5>;
+  defm PseudoVSM3ME : VPseudoVALU_VV_NoMaskTU_Zvk;
+} // Predicates = [HasStdExtZvksh]
 
 //===----------------------------------------------------------------------===//
 // SDNode patterns
@@ -399,3 +625,245 @@ defm : VPatUnaryVL_V<riscv_bswap_vl, "PseudoVREV8">;
 defm : VPatUnaryVL_V<riscv_ctlz_vl, "PseudoVCLZ">;
 defm : VPatUnaryVL_V<riscv_cttz_vl, "PseudoVCTZ">;
 defm : VPatUnaryVL_V<riscv_ctpop_vl, "PseudoVCPOP">;
+
+//===----------------------------------------------------------------------===//
+// Codegen patterns
+//===----------------------------------------------------------------------===//
+
+class VPatUnaryNoMask_Zvk<string intrinsic_name,
+                      string inst,
+                      string kind,
+                      ValueType result_type,
+                      ValueType op2_type,
+                      int sew,
+                      LMULInfo vlmul,
+                      VReg result_reg_class,
+                      VReg op2_reg_class> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   VLOpFrag, (XLenVT timm:$policy))),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   GPR:$vl, sew, (XLenVT timm:$policy))>;
+
+class VPatUnaryNoMask_VS_Zvk<string intrinsic_name,
+                      string inst,
+                      string kind,
+                      ValueType result_type,
+                      ValueType op2_type,
+                      int sew,
+                      LMULInfo vlmul,
+                      LMULInfo vs2_lmul,
+                      VReg result_reg_class,
+                      VReg op2_reg_class> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   VLOpFrag, (XLenVT timm:$policy))),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX)
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   GPR:$vl, sew, (XLenVT timm:$policy))>;
+
+multiclass VPatUnaryV_V_NoMask_Zvk<string intrinsic, string instruction,
+                              list<VTypeInfo> vtilist> {
+  foreach vti = vtilist in
+    def : VPatUnaryNoMask_Zvk<intrinsic # "_vv", instruction, "VV",
+                          vti.Vector, vti.Vector, vti.Log2SEW,
+                          vti.LMul, vti.RegClass, vti.RegClass>;
+}
+
+multiclass VPatUnaryV_S_NoMaskVectorCtypto<string intrinsic, string instruction,
+                               list<VTypeInfo> vtilist> {
+  foreach vti = vtilist in
+    foreach vti_vs2 = ZvkI32IntegerVectors<vti.LMul.MX>.vs2_types in
+      def : VPatUnaryNoMask_VS_Zvk<intrinsic # "_vs", instruction, "VS",
+                            vti.Vector, vti_vs2.Vector, vti.Log2SEW,
+                            vti.LMul, vti_vs2.LMul, vti.RegClass, vti_vs2.RegClass>;
+}
+
+multiclass VPatUnaryV_V_S_NoMask_Zvk<string intrinsic, string instruction,
+                                list<VTypeInfo> vtilist> {
+  defm : VPatUnaryV_V_NoMask_Zvk<intrinsic, instruction, vtilist>;
+  defm : VPatUnaryV_S_NoMaskVectorCtypto<intrinsic, instruction, vtilist>;
+}
+
+multiclass VPatBinaryV_VV_NoMask<string intrinsic, string instruction,
+                          list<VTypeInfo> vtilist> {
+  foreach vti = vtilist in
+    def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VV",
+                                      vti.Vector, vti.Vector, vti.Vector,
+                                      vti.Log2SEW, vti.LMul, vti.RegClass,
+                                      vti.RegClass, vti.RegClass>;
+}
+
+multiclass VPatBinaryV_VI_NoMask<string intrinsic, string instruction,
+                          list<VTypeInfo> vtilist, Operand imm_type = tuimm5> {
+  foreach vti = vtilist in
+    def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VI",
+                                      vti.Vector, vti.Vector, XLenVT,
+                                      vti.Log2SEW, vti.LMul, vti.RegClass,
+                                      vti.RegClass, imm_type>;
+}
+
+multiclass VPatBinaryV_VI_NoMaskTU<string intrinsic, string instruction,
+                          list<VTypeInfo> vtilist, Operand imm_type = tuimm5> {
+  foreach vti = vtilist in
+    def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VI_" # vti.LMul.MX,
+                             vti.Vector, vti.Vector, XLenVT, vti.Log2SEW,
+                             vti.RegClass, vti.RegClass, imm_type>;
+}
+
+multiclass VPatBinaryV_VV_NoMaskTU<string intrinsic, string instruction,
+                          list<VTypeInfo> vtilist> {
+  foreach vti = vtilist in
+    def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VV_" # vti.LMul.MX,
+                             vti.Vector, vti.Vector, vti.Vector, vti.Log2SEW,
+                             vti.RegClass, vti.RegClass, vti.RegClass>;
+}
+
+multiclass VPatBinaryV_VX_VROTATE<string intrinsic, string instruction,
+                                  list<VTypeInfo> vtilist, bit isSEWAware = 0> {
+  foreach vti = vtilist in {
+    defvar kind = "V"#vti.ScalarSuffix;
+    let Predicates = GetVTypePredicates<vti>.Predicates in
+    defm : VPatBinary<intrinsic,
+                      !if(isSEWAware,
+                          instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
+                          instruction#"_"#kind#"_"#vti.LMul.MX),
+                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                      vti.Log2SEW, vti.RegClass,
+                      vti.RegClass, vti.ScalarRegClass>;
+  }
+}
+
+multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
+                               list<VTypeInfo> vtilist, bit isSEWAware = 0> {
+  foreach vti = vtilist in {
+    defvar Intr = !cast<Intrinsic>(intrinsic);
+    defvar Pseudo = !cast<Instruction>(
+        !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW,
+                        instruction#"_VI_"#vti.LMul.MX));
+    let Predicates = GetVTypePredicates<vti>.Predicates in
+    def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$merge),
+                          (vti.Vector vti.RegClass:$rs2),
+                          (XLenVT uimm6:$rs1),
+                          VLOpFrag)),
+                          (Pseudo (vti.Vector vti.RegClass:$merge),
+                          (vti.Vector vti.RegClass:$rs2),
+                          (InvRot64Imm uimm6:$rs1),
+                          GPR:$vl, vti.Log2SEW, TU_MU)>;
+
+    defvar IntrMask = !cast<Intrinsic>(intrinsic#"_mask");
+    defvar PseudoMask = !cast<Instruction>(
+        !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK",
+                        instruction#"_VI_"#vti.LMul.MX#"_MASK"));
+    let Predicates = GetVTypePredicates<vti>.Predicates in
+    def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$merge),
+                          (vti.Vector vti.RegClass:$rs2),
+                          (XLenVT uimm6:$rs1),
+                          (vti.Mask V0),
+                          VLOpFrag, (XLenVT timm:$policy))),
+                          (PseudoMask (vti.Vector vti.RegClass:$merge),
+                          (vti.Vector vti.RegClass:$rs2),
+                          (InvRot64Imm uimm6:$rs1),
+                          (vti.Mask V0),
+                          GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
+    }
+}
+
+multiclass VPatBinaryV_VV_VX_VROL<string intrinsic, string instruction,
+				  string instruction2, list<VTypeInfo> vtilist>
+    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
+      VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
+      VPatBinaryV_VI_VROL<intrinsic, instruction2, vtilist>;
+
+multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction,
+                                     list<VTypeInfo> vtilist, Operand ImmType = uimm6>
+    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
+      VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
+      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
+
+multiclass VPatBinaryW_VI_VWSLL<string intrinsic, string instruction,
+                                list<VTypeInfoToWide> vtilist> {
+  foreach VtiToWti = vtilist in {
+    defvar Vti = VtiToWti.Vti;
+    defvar Wti = VtiToWti.Wti;
+    defm : VPatBinary<intrinsic, instruction # "_VI_" # Vti.LMul.MX,
+                      Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
+                      Vti.Log2SEW, Wti.RegClass,
+                      Vti.RegClass, uimm5>;
+  }
+}
+
+multiclass VPatBinaryW_VX_VWSLL<string intrinsic, string instruction,
+                                list<VTypeInfoToWide> vtilist> {
+  foreach VtiToWti = vtilist in {
+    defvar Vti = VtiToWti.Vti;
+    defvar Wti = VtiToWti.Wti;
+    defvar kind = "V"#Vti.ScalarSuffix;
+    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
+                                 GetVTypePredicates<Wti>.Predicates) in
+    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
+                      Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
+                      Vti.Log2SEW, Wti.RegClass,
+                      Vti.RegClass, Vti.ScalarRegClass>;
+  }
+}
+
+multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction,
+                                      list<VTypeInfoToWide> vtilist>
+    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
+      VPatBinaryW_VX_VWSLL<intrinsic, instruction, vtilist>,
+      VPatBinaryW_VI_VWSLL<intrinsic, instruction, vtilist>;
+
+let Predicates = [HasStdExtZvbb] in {
+  defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>;
+  defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>;
+  defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>;
+  defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>;
+  defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>;
+  defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>;
+  defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>;
+  defm : VPatBinaryV_VV_VX_VROL<"int_riscv_vrol", "PseudoVROL", "PseudoVROR", AllIntegerVectors>;
+  defm : VPatBinaryV_VV_VX_VI_VROR<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>;
+  defm : VPatBinaryW_VV_VX_VI_VWSLL<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>;
+} // Predicates = [HasStdExtZvbb]
+
+let Predicates = [HasStdExtZvbc] in {
+  defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>;
+  defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>;
+} // Predicates = [HasStdExtZvbc]
+
+let Predicates = [HasStdExtZvkg] in {
+  defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>;
+  defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>;
+} // Predicates = [HasStdExtZvkg]
+
+let Predicates = [HasStdExtZvkned] in {
+  defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>;
+  defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>;
+  defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>;
+  defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>;
+  defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>;
+  defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>;
+  defm : VPatUnaryV_S_NoMaskVectorCtypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>;
+} // Predicates = [HasStdExtZvkned]
+
+let Predicates = [HasStdExtZvknha] in {
+  defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>;
+  defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>;
+  defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors>;
+} // Predicates = [HasStdExtZvknha]
+
+let Predicates = [HasStdExtZvksed] in {
+  defm : VPatBinaryV_VI_NoMaskTU<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>;
+  defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>;
+} // Predicates = [HasStdExtZvksed]
+
+let Predicates = [HasStdExtZvksh] in {
+  defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>;
+  defm : VPatBinaryV_VV_NoMaskTU<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>;
+} // Predicates = [HasStdExtZvksh]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll
new file mode 100644
index 00000000000000..b02c63b29cb8bc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesdf.vv.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesdf_vv_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdf_vv_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesdf.vv v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vv.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesdf.vv.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesdf_vv_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdf_vv_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesdf.vv v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vv.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesdf.vv.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesdf_vv_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdf_vv_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesdf.vv v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vv.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesdf.vs.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesdf_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdf_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesdf.vs v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vs.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesdf.vs.nxv8i32.nxv4i32(
+  <vscale x 8 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesdf_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdf_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesdf.vs v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vs.nxv8i32.nxv4i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesdf.vs.nxv16i32.nxv4i32(
+  <vscale x 16 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesdf_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdf_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesdf.vs v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vs.nxv16i32.nxv4i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll
new file mode 100644
index 00000000000000..bc1a7c22f373cb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesdm.vv.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesdm_vv_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdm_vv_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesdm.vv v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vv.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesdm.vv.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesdm_vv_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdm_vv_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesdm.vv v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vv.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesdm.vv.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesdm_vv_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdm_vv_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesdm.vv v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vv.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesdm.vs.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesdm_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdm_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesdm.vs v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vs.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesdm.vs.nxv8i32.nxv4i32(
+  <vscale x 8 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesdm_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdm_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesdm.vs v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vs.nxv8i32.nxv4i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesdm.vs.nxv16i32.nxv4i32(
+  <vscale x 16 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesdm_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesdm_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesdm.vs v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vs.nxv16i32.nxv4i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaesef.ll b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll
new file mode 100644
index 00000000000000..44ab69f0a9a838
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesef.vv.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesef_vv_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesef_vv_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesef.vv v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesef.vv.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesef.vv.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesef_vv_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesef_vv_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesef.vv v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesef.vv.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesef.vv.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesef_vv_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesef_vv_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesef.vv v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesef.vv.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesef.vs.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesef_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesef_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesef.vs v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesef.vs.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesef.vs.nxv8i32.nxv4i32(
+  <vscale x 8 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesef_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesef_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesef.vs v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesef.vs.nxv8i32.nxv4i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesef.vs.nxv16i32.nxv4i32(
+  <vscale x 16 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesef_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesef_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesef.vs v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesef.vs.nxv16i32.nxv4i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaesem.ll b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll
new file mode 100644
index 00000000000000..0f35ce017c28c9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesem.vv.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesem_vv_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesem_vv_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesem.vv v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesem.vv.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesem.vv.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesem_vv_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesem_vv_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesem.vv v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesem.vv.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesem.vv.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesem_vv_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesem_vv_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesem.vv v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesem.vv.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesem_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesem_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesem.vs v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.nxv4i32(
+  <vscale x 8 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesem_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesem_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesem.vs v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.nxv4i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.nxv4i32(
+  <vscale x 16 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesem_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesem_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesem.vs v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.nxv4i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll
new file mode 100644
index 00000000000000..681ff0d3bf6be3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll
@@ -0,0 +1,71 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vaeskf1.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vaeskf1_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vaeskf1.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaeskf1.nxv4i32.i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaeskf1.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vaeskf1_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vaeskf1.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaeskf1.nxv8i32.i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaeskf1.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vaeskf1_vi_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vaeskf1.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaeskf1.nxv16i32.i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll
new file mode 100644
index 00000000000000..ba37f69c380cb9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vaeskf2.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vaeskf2_vi_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaeskf2.vi v8, v10, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaeskf2.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen 2,
+    iXLen %2,
+    iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaeskf2.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vaeskf2_vi_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaeskf2.vi v8, v12, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaeskf2.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen 2,
+    iXLen %2,
+    iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaeskf2.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vaeskf2_vi_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaeskf2.vi v8, v16, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaeskf2.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen 2,
+    iXLen %2,
+    iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vaesz.ll b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll
new file mode 100644
index 00000000000000..5c08cf3013365a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vaesz.vs.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vaesz_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesz_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vaesz.vs v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vaesz.vs.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vaesz.vs.nxv8i32.nxv4i32(
+  <vscale x 8 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vaesz_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesz_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vaesz.vs v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vaesz.vs.nxv8i32.nxv4i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vaesz.vs.nxv16i32.nxv4i32(
+  <vscale x 16 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vaesz_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vaesz_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vaesz.vs v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vaesz.vs.nxv16i32.nxv4i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vandn.ll b/llvm/test/CodeGen/RISCV/rvv/vandn.ll
new file mode 100644
index 00000000000000..40f65f393b1c9d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn.ll
@@ -0,0 +1,2173 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vandn_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vandn_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vandn_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vandn_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vandn_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vandn_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vandn_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vandn_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vandn_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vandn_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vandn_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vandn_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vandn_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vandn_mask_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vandn_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vandn_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vandn_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vandn_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vandn_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vandn_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vandn_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vandn_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vandn_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vandn_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vandn_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv32i16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vandn_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vandn_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vandn_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vandn_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vandn_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vandn_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vandn_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vandn_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vandn_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vandn_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vandn_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vandn_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vandn_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vandn_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vandn_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vandn_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vandn_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vandn_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vandn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vandn_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vandn.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vandn_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv1i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vandn_mask_vx_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vandn_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv2i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vandn_mask_vx_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vandn_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv4i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vandn_mask_vx_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vandn_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv8i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vandn_mask_vx_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vandn_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv16i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vandn_mask_vx_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vandn_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv32i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vandn_mask_vx_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vandn_vx_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv64i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    iXLen %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vandn_mask_vx_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv64i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 %2,
+    <vscale x 64 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vandn_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv1i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vandn_mask_vx_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vandn_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv2i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vandn_mask_vx_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vandn_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv4i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vandn_mask_vx_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vandn_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv8i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vandn_mask_vx_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vandn_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv16i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vandn_mask_vx_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vandn_vx_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv32i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vandn_mask_vx_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vandn_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv1i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vandn_mask_vx_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vandn_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv2i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vandn_mask_vx_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vandn_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vandn_mask_vx_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vandn_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vandn_mask_vx_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vandn_vx_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vandn_vx_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    vandn.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vandn_mask_vx_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vandn.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vandn_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vandn_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vandn.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vandn_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vandn.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vandn_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vandn_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vandn.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vandn_mask_vx_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vandn.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vandn_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vandn_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vandn.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vandn_mask_vx_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vandn.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vandn_vx_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vandn_vx_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_vx_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vandn.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vandn_mask_vx_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vandn.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vandn.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vbrev.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll
new file mode 100644
index 00000000000000..f95e43fb5fcb8c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll
@@ -0,0 +1,951 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vbrev.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vbrev_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vbrev.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vbrev.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vbrev_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vbrev.mask.nxv1i8(
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vbrev.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vbrev_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vbrev.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vbrev.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vbrev_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vbrev.mask.nxv2i8(
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vbrev.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vbrev_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vbrev.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vbrev.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vbrev_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vbrev.mask.nxv4i8(
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vbrev.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vbrev_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vbrev.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vbrev.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vbrev_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vbrev.mask.nxv8i8(
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vbrev.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vbrev_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vbrev.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vbrev.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vbrev_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vbrev.mask.nxv16i8(
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vbrev.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vbrev_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vbrev.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vbrev.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vbrev_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vbrev.mask.nxv32i8(
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vbrev.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vbrev_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vbrev.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vbrev.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vbrev_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vbrev.mask.nxv64i8(
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vbrev.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vbrev_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vbrev.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vbrev.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vbrev_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vbrev.mask.nxv1i16(
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vbrev.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vbrev_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vbrev.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vbrev.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vbrev_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vbrev.mask.nxv2i16(
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vbrev.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vbrev_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vbrev.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vbrev.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vbrev_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vbrev.mask.nxv4i16(
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vbrev.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vbrev_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vbrev.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vbrev.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vbrev_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vbrev.mask.nxv8i16(
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vbrev.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vbrev_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vbrev.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vbrev.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vbrev_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vbrev.mask.nxv16i16(
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vbrev.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vbrev_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vbrev.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vbrev.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vbrev_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vbrev.mask.nxv32i16(
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vbrev.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vbrev_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vbrev.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vbrev.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vbrev_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vbrev.mask.nxv1i32(
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vbrev.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vbrev_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vbrev.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vbrev.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vbrev_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vbrev.mask.nxv2i32(
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vbrev.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vbrev_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vbrev.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vbrev.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vbrev_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vbrev.mask.nxv4i32(
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vbrev.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vbrev_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vbrev.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vbrev.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vbrev_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vbrev.mask.nxv8i32(
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vbrev.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vbrev_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vbrev.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vbrev.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vbrev_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vbrev.mask.nxv16i32(
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vbrev.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vbrev_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vbrev.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vbrev.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vbrev_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vbrev.mask.nxv1i64(
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vbrev.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vbrev_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vbrev.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vbrev.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vbrev_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vbrev.mask.nxv2i64(
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vbrev.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vbrev_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vbrev.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vbrev.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vbrev_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vbrev.mask.nxv4i64(
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vbrev.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vbrev_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vbrev.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vbrev.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vbrev.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vbrev_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vbrev.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vbrev.mask.nxv8i64(
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll
new file mode 100644
index 00000000000000..57862743eeb0dd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll
@@ -0,0 +1,951 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vbrev8_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vbrev8_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8(
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vbrev8_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vbrev8_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8(
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vbrev8_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vbrev8_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8(
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vbrev8_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vbrev8_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8(
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vbrev8_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vbrev8_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8(
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vbrev8_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vbrev8_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8(
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vbrev8_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vbrev8_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8(
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vbrev8_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vbrev8_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16(
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vbrev8_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vbrev8_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16(
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vbrev8_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vbrev8_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16(
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vbrev8_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vbrev8_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16(
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vbrev8_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vbrev8_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16(
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vbrev8_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vbrev8_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16(
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vbrev8_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vbrev8_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32(
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vbrev8_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vbrev8_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32(
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vbrev8_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vbrev8_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32(
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vbrev8_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vbrev8_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32(
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vbrev8_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vbrev8_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32(
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vbrev8_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vbrev8_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64(
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vbrev8_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vbrev8_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64(
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vbrev8_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vbrev8_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64(
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vbrev8_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vbrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vbrev8_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vbrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64(
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vclmul.ll b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll
new file mode 100644
index 00000000000000..4ed3bd813a3d74
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll
@@ -0,0 +1,478 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbc \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbc \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmul_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vclmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vclmul.vv v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmul_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vclmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vclmul.vv v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmul_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vclmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vclmul.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmul_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vclmul.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vclmul.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmul_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmul_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vclmul.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmul_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v9, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vclmul.vx v8, v9, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmul_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmul_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vclmul.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmul_mask_vx_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v10, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vclmul.vx v8, v10, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmul_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmul_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vclmul.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmul_mask_vx_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v12, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vclmul.vx v8, v12, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmul_vx_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmul_vx_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_vx_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vclmul.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmul_mask_vx_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vclmul.vv v8, v16, v24
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vclmul.vx v8, v16, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll
new file mode 100644
index 00000000000000..a7881841b83f1f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll
@@ -0,0 +1,478 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbc \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbc \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmulh_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vclmulh.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vclmulh.vv v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmulh_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vclmulh.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vclmulh.vv v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmulh_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vclmulh.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vclmulh.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmulh_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vclmulh.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vclmulh.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmulh_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vclmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v9, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vclmulh.vx v8, v9, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmulh_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vclmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vx_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v10, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vclmulh.vx v8, v10, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmulh_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vclmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vx_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v12, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vclmulh.vx v8, v12, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmulh_vx_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vclmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vx_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vclmulh.vv v8, v16, v24
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vclmulh.vx v8, v16, a0
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vclz.ll b/llvm/test/CodeGen/RISCV/rvv/vclz.ll
new file mode 100644
index 00000000000000..aaa760106e91c5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vclz.ll
@@ -0,0 +1,951 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vclz.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vclz_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vclz.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vclz_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8(
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vclz.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vclz_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vclz.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vclz_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8(
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vclz.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vclz_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vclz.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vclz_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8(
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vclz.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vclz_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vclz.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vclz_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8(
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vclz.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vclz_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vclz.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vclz_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vclz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8(
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vclz.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vclz_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vclz.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vclz_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vclz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8(
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vclz.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vclz_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vclz.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vclz_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vclz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8(
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vclz.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vclz_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vclz.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vclz_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16(
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vclz.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vclz_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vclz.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vclz_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16(
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vclz.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vclz_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vclz.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vclz_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16(
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vclz.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vclz_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vclz.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vclz_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vclz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16(
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vclz.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vclz_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vclz.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vclz_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vclz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16(
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vclz.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vclz_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vclz.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vclz_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vclz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16(
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vclz.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vclz_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vclz.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vclz_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32(
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vclz.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vclz_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vclz.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vclz_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32(
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vclz.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vclz_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vclz.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vclz_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vclz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32(
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vclz.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vclz_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vclz.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vclz_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vclz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32(
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vclz.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vclz_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vclz.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vclz_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vclz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32(
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclz.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vclz_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclz.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vclz_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vclz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64(
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclz.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vclz_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclz.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vclz_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vclz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64(
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclz.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vclz_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclz.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vclz_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vclz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64(
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclz.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vclz_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vclz_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vclz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclz.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vclz_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vclz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64(
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll
new file mode 100644
index 00000000000000..99dfc3be9a00c0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll
@@ -0,0 +1,951 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vcpopv.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vcpopv_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vcpopv.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vcpopv_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8(
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vcpopv.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vcpopv_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vcpopv.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vcpopv_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8(
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vcpopv.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vcpopv_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vcpopv.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vcpopv_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8(
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vcpopv.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vcpopv_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vcpopv.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vcpopv_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8(
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vcpopv.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vcpopv_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vcpopv.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vcpopv_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8(
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vcpopv.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vcpopv_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vcpopv.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vcpopv_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8(
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vcpopv.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vcpopv_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vcpopv.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vcpopv_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8(
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vcpopv.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vcpopv_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vcpopv.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vcpopv_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16(
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vcpopv.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vcpopv_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vcpopv.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vcpopv_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16(
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vcpopv.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vcpopv_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vcpopv.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vcpopv_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16(
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vcpopv.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vcpopv_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vcpopv.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vcpopv_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16(
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vcpopv.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vcpopv_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vcpopv.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vcpopv_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16(
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vcpopv.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vcpopv_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vcpopv.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vcpopv_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16(
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vcpopv.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vcpopv_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vcpopv.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vcpopv_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32(
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vcpopv.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vcpopv_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vcpopv.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vcpopv_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32(
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vcpopv_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vcpopv_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32(
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vcpopv.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vcpopv_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vcpopv.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vcpopv_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32(
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vcpopv.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vcpopv_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vcpopv.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vcpopv_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32(
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vcpopv.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vcpopv_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vcpopv.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vcpopv_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64(
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vcpopv.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vcpopv_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vcpopv.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vcpopv_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64(
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vcpopv.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vcpopv_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vcpopv.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vcpopv_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64(
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vcpopv.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vcpopv_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vcpop.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vcpopv.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vcpopv_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vcpop.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64(
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vctz.ll b/llvm/test/CodeGen/RISCV/rvv/vctz.ll
new file mode 100644
index 00000000000000..763fac01481172
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vctz.ll
@@ -0,0 +1,951 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vctz.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vctz_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vctz.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vctz.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vctz_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vctz.mask.nxv1i8(
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vctz.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vctz_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vctz.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vctz.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vctz_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vctz.mask.nxv2i8(
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vctz.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vctz_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vctz.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vctz.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vctz_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vctz.mask.nxv4i8(
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vctz.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vctz_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vctz.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vctz.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vctz_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vctz.mask.nxv8i8(
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vctz.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vctz_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vctz.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vctz.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vctz_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vctz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vctz.mask.nxv16i8(
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vctz.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vctz_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vctz.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vctz.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vctz_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vctz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vctz.mask.nxv32i8(
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vctz.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vctz_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vctz.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vctz.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vctz_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vctz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vctz.mask.nxv64i8(
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vctz.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vctz_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vctz.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vctz.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vctz_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vctz.mask.nxv1i16(
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vctz.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vctz_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vctz.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vctz.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vctz_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vctz.mask.nxv2i16(
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vctz.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vctz_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vctz.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vctz.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vctz_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vctz.mask.nxv4i16(
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vctz.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vctz_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vctz.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vctz.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vctz_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vctz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vctz.mask.nxv8i16(
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vctz.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vctz_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vctz.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vctz.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vctz_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vctz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vctz.mask.nxv16i16(
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vctz.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vctz_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vctz.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vctz.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vctz_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vctz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vctz.mask.nxv32i16(
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vctz.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vctz_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vctz.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vctz.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vctz_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vctz.mask.nxv1i32(
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vctz.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vctz_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vctz.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vctz.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vctz_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vctz.mask.nxv2i32(
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vctz.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vctz_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vctz.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vctz.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vctz_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vctz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vctz.mask.nxv4i32(
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vctz.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vctz_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vctz.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vctz.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vctz_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vctz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vctz.mask.nxv8i32(
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vctz.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vctz_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vctz.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vctz.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vctz_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vctz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vctz.mask.nxv16i32(
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vctz.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vctz_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vctz.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vctz.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vctz_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vctz.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vctz.mask.nxv1i64(
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vctz.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vctz_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vctz.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vctz.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vctz_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vctz.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vctz.mask.nxv2i64(
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vctz.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vctz_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vctz.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vctz.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vctz_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vctz.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vctz.mask.nxv4i64(
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vctz.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vctz_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vctz_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vctz.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vctz.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vctz.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vctz_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vctz.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vctz.mask.nxv8i64(
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vghsh.ll b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll
new file mode 100644
index 00000000000000..977386dee8f524
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkg \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkg \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i32> @llvm.riscv.vghsh.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vghsh_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vghsh_vv_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vghsh.vv v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vghsh.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vghsh.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vghsh_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vghsh_vv_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vghsh.vv v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vghsh.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vghsh.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vghsh_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vghsh_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vghsh.vv v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vghsh.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vghsh.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vghsh_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vghsh_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vghsh.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vghsh.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vghsh.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vghsh_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vghsh_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vghsh.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vghsh.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vgmul.ll b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll
new file mode 100644
index 00000000000000..d4ce44d9eec2be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkg \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkg \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i32> @llvm.riscv.vgmul.vv.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vgmul_vs_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vgmul_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vgmul.vv v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vgmul.vv.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vgmul.vv.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vgmul_vs_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vgmul_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vgmul.vv v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vgmul.vv.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vgmul.vv.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vgmul_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vgmul_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vgmul.vv v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vgmul.vv.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vgmul.vv.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vgmul_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vgmul_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vgmul.vv v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vgmul.vv.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vgmul.vv.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vgmul_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vgmul_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vgmul.vv v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vgmul.vv.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll
new file mode 100644
index 00000000000000..ee9716c4cd60de
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll
@@ -0,0 +1,951 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrev8_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrev8_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8(
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vrev8_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vrev8_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8(
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vrev8_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vrev8_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8(
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vrev8_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vrev8_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8(
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vrev8_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vrev8_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8(
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vrev8_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vrev8_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8(
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vrev8_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vrev8_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8(
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vrev8_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vrev8_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16(
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vrev8_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vrev8_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16(
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vrev8_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vrev8_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16(
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vrev8_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vrev8_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16(
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vrev8_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vrev8_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16(
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vrev8_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vrev8_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16(
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vrev8_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vrev8_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32(
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vrev8_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vrev8_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32(
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vrev8_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vrev8_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32(
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vrev8_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vrev8_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32(
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vrev8_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vrev8_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32(
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vrev8_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vrev8_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64(
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vrev8_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vrev8_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64(
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vrev8_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vrev8_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64(
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vrev8_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vrev8.v v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vrev8_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vrev8.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64(
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %0,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrol.ll b/llvm/test/CodeGen/RISCV/rvv/vrol.ll
new file mode 100644
index 00000000000000..0c446045de0b3c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrol.ll
@@ -0,0 +1,2803 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vrol_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vrol_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vrol_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vrol_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vrol_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vrol_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vrol_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vrol_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vrol_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vrol_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vrol_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vrol_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vrol_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vrol_mask_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vrol_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vrol_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vrol_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vrol_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vrol_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vrol_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vrol_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vrol_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vrol_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vrol_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vrol_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv32i16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vrol_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vrol_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vrol_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vrol_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vrol_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vrol_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vrol_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vrol_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vrol_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vrol_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vrol_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vrol_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vrol_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vrol_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vrol_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vrol_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vrol_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vrol_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vrol.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vrol_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vrol.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vrol_vx_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vrol_mask_vx_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vrol_vx_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vrol_mask_vx_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vrol_vx_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vrol_mask_vx_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vrol_vx_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vrol_mask_vx_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vrol_vx_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vrol_mask_vx_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vrol_vx_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vrol_mask_vx_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vrol_vx_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vrol_mask_vx_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    iXLen %2,
+    <vscale x 64 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vrol_vx_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vrol_mask_vx_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vrol_vx_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vrol_mask_vx_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vrol_vx_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vrol_mask_vx_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vrol_vx_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vrol_mask_vx_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vrol_vx_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vrol_mask_vx_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vrol_vx_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vrol_mask_vx_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    iXLen %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vrol_vx_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vrol_mask_vx_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vrol_vx_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vrol_mask_vx_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vrol_vx_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vrol_mask_vx_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vrol_vx_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vrol_mask_vx_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vrol_vx_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vrol_mask_vx_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vrol_vx_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vrol_mask_vx_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vrol_vx_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vrol_mask_vx_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vrol_vx_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vrol_mask_vx_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vrol_vx_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vx_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    vrol.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vrol_mask_vx_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vrol.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrol_vi_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrol_mask_vi_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrol_vi_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrol_mask_vi_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrol_vi_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrol_mask_vi_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrol_vi_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrol_mask_vi_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrol_vi_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrol_mask_vi_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen 2,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrol_vi_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrol_mask_vi_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen 2,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrol_vi_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrol_mask_vi_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    iXLen 2,
+    <vscale x 64 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrol_vi_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrol_mask_vi_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrol_vi_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrol_mask_vi_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrol_vi_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrol_mask_vi_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrol_vi_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrol_mask_vi_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrol_vi_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrol_mask_vi_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen 2,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrol_vi_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrol_mask_vi_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    iXLen 2,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrol_vi_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrol_mask_vi_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrol_vi_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrol_mask_vi_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrol_vi_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrol_mask_vi_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrol_vi_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrol_mask_vi_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrol_vi_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrol_mask_vi_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen 2,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vrol_vi_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vrol_mask_vi_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrol_vi_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrol_mask_vi_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrol_vi_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrol_mask_vi_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrol_vi_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrol_vi_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrol_mask_vi_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vror.ll b/llvm/test/CodeGen/RISCV/rvv/vror.ll
new file mode 100644
index 00000000000000..701690392d13a9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vror.ll
@@ -0,0 +1,2803 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vror_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vror_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vror_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vror_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vror_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vror_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vror_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vror_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vror_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vror_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vror_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vror_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vror_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vror_mask_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i8> %2,
+    <vscale x 64 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vror_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vror_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vror_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vror_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vror_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vror_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vror_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vror_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vror_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vror_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vror_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv32i16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vror_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vror_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vror_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vror_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vror_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vror_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vror_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vror_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vror_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vror_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vror_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vror_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vror_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i64_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vror_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vror_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i64_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vror_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vror_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vror_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vror.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vror_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vror_vx_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i8> @intrinsic_vror_mask_vx_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vror_vx_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i8> @intrinsic_vror_mask_vx_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vror_vx_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i8> @intrinsic_vror_mask_vx_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vror_vx_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i8> @intrinsic_vror_mask_vx_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vror_vx_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i8> @intrinsic_vror_mask_vx_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vror_vx_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i8> @intrinsic_vror_mask_vx_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vror_vx_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  iXLen,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 64 x i8> @intrinsic_vror_mask_vx_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    iXLen %2,
+    <vscale x 64 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vror_vx_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i16> @intrinsic_vror_mask_vx_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vror_vx_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i16> @intrinsic_vror_mask_vx_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vror_vx_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i16> @intrinsic_vror_mask_vx_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vror_vx_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i16> @intrinsic_vror_mask_vx_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vror_vx_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i16> @intrinsic_vror_mask_vx_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vror_vx_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x i16> @intrinsic_vror_mask_vx_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    iXLen %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vror_vx_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i32> @intrinsic_vror_mask_vx_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vror_vx_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i32> @intrinsic_vror_mask_vx_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vror_vx_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vror_mask_vx_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vror_vx_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vror_mask_vx_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vror_vx_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vror_mask_vx_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vror_vx_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vror_mask_vx_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vror_vx_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vror_mask_vx_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vror_vx_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vror_mask_vx_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vror_vx_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vror_vx_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT:    vror.vx v8, v8, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vror_mask_vx_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vror_vi_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vror_mask_vi_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vror_vi_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vror_mask_vi_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vror_vi_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vror_mask_vi_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vror_vi_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vror_mask_vi_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vror_vi_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vror_mask_vi_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen 2,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vror_vi_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vror_mask_vi_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen 2,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vror_vi_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vror_mask_vi_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    iXLen 2,
+    <vscale x 64 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vror_vi_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vror_mask_vi_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vror_vi_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vror_mask_vi_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vror_vi_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vror_mask_vi_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vror_vi_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vror_mask_vi_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vror_vi_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vror_mask_vi_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen 2,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vror_vi_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vror_mask_vi_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    iXLen 2,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vror_vi_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vror_mask_vi_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vror_vi_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vror_mask_vi_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vror_vi_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vror_mask_vi_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vror_vi_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vror_mask_vi_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vror_vi_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vror_mask_vi_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen 2,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vror_vi_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vror_mask_vi_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    iXLen 2,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vror_vi_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vror_mask_vi_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    iXLen 2,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vror_vi_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vror_mask_vi_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    iXLen 2,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vror_vi_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vror_vi_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vror.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vror_mask_vi_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vror.vi v8, v16, 2, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    iXLen 2,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll
new file mode 100644
index 00000000000000..52c43f9713a608
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvknhb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvknhb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vsha2ch.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vsha2ch_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsha2ch.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsha2ch.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vsha2ch_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsha2ch.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsha2ch.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vsha2ch_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsha2ch.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsha2ch.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsha2ch_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsha2ch.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsha2ch.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsha2ch_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsha2ch.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll
new file mode 100644
index 00000000000000..49ae2858e1461b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvknhb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvknhb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vsha2ch.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll
new file mode 100644
index 00000000000000..d26c9f5a373862
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvknha,+experimental-zvknhb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvknha,+experimental-zvknhb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vsha2ms.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vsha2ms_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsha2ms.vv v8, v10, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsha2ms.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsha2ms.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vsha2ms_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsha2ms.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsha2ms.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsha2ms.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vsha2ms_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsha2ms.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsha2ms.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i32> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsha2ms.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsha2ms_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i64_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vsha2ms.vv v8, v12, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsha2ms.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i64> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsha2ms.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsha2ms_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i64_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vsha2ms.vv v8, v16, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsha2ms.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i64> %2,
+    iXLen %3,
+    iXLen 2)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll
new file mode 100644
index 00000000000000..16d29028c1e30f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksh \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksh \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 8 x i32> @llvm.riscv.vsm3c.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vsm3c_vi_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm3c_vi_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsm3c.vi v8, v12, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsm3c.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen 2,
+    iXLen %2,
+    iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsm3c.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vsm3c_vi_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm3c_vi_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsm3c.vi v8, v16, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsm3c.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen 2,
+    iXLen %2,
+    iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll
new file mode 100644
index 00000000000000..ed7f21312c5e22
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksh \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksh \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 8 x i32> @llvm.riscv.vsm3me.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vsm3me_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm3me_vv_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsm3me.vv v8, v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsm3me.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsm3me.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vsm3me_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm3me_vv_nxv16i32_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsm3me.vv v8, v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsm3me.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll
new file mode 100644
index 00000000000000..fe2bb9a77bcc4a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll
@@ -0,0 +1,71 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksed \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksed \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vsm4k.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x i32> @intrinsic_vsm4k_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vsm4k_vi_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsm4k.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsm4k.nxv4i32.i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsm4k.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x i32> @intrinsic_vsm4k_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vsm4k_vi_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsm4k.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsm4k.nxv8i32.i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsm4k.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x i32> @intrinsic_vsm4k_vi_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vsm4k_vi_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsm4k.vi v8, v8, 2
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsm4k.nxv16i32.i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32> %0,
+    iXLen 2,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll
new file mode 100644
index 00000000000000..f9da81eb9575a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksed \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksed \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 4 x i32> @llvm.riscv.vsm4r.vv.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vsm4r_vv_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm4r_vv_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsm4r.vv v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vv.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsm4r.vv.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vsm4r_vv_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm4r_vv_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsm4r.vv v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vv.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsm4r.vv.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vsm4r_vv_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm4r_vv_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsm4r.vv v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vv.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsm4r.vs.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vsm4r_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm4r_vs_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vsm4r.vs v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vs.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsm4r.vs.nxv8i32.nxv4i32(
+  <vscale x 8 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vsm4r_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm4r_vs_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vsm4r.vs v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vs.nxv8i32.nxv4i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsm4r.vs.nxv16i32.nxv4i32(
+  <vscale x 16 x i32>,
+  <vscale x 4 x i32>,
+  iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vsm4r_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm4r_vs_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
+; CHECK-NEXT:    vsm4r.vs v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vs.nxv16i32.nxv4i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2, iXLen 2)
+
+  ret <vscale x 16 x i32> %a
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsll.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll
new file mode 100644
index 00000000000000..9909fc62d0c80c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll
@@ -0,0 +1,1955 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i8> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i8> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i8> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i8> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vwsll.vv v12, v8, v10
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i8> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vwsll.vv v16, v8, v12
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i8> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vwsll.vv v12, v8, v10
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vwsll.vv v16, v8, v12
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vwsll.vv v12, v8, v10
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vwsll.vv v16, v8, v12
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vwsll.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsll.nxv1i16.nxv1i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsll_vx_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vwsll.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsll.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i8>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen%2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwsll.nxv2i16.nxv2i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vwsll_vx_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vwsll.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsll.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i8> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i8>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen%2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    iXLen%2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwsll.nxv4i16.nxv4i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vwsll_vx_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsll.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i8> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i8>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen%2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    iXLen%2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwsll.nxv8i16.nxv8i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vwsll_vx_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsll.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i8> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i8>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen%2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    iXLen%2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwsll.nxv16i16.nxv16i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vwsll_vx_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vwsll.vx v12, v8, a0
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsll.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i8> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i8>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen%2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    iXLen%2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwsll.nxv32i16.nxv32i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vwsll_vx_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vwsll.vx v16, v8, a0
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsll.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i8> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i8>,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen%2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    iXLen%2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwsll.nxv1i32.nxv1i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vwsll_vx_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vwsll.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsll.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i16> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i16>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen%2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    iXLen%2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwsll.nxv2i32.nxv2i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vwsll_vx_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsll.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i16> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i16>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen%2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    iXLen%2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vwsll_vx_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i16> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i16>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen%2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    iXLen%2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwsll.nxv8i32.nxv8i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vwsll_vx_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vwsll.vx v12, v8, a0
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsll.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i16> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i16>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen%2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    iXLen%2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwsll.nxv16i32.nxv16i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vwsll_vx_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vwsll.vx v16, v8, a0
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsll.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i16> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i16>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen%2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    iXLen%2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwsll.nxv1i64.nxv1i32(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vwsll_vx_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsll.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i32> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i32>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen%2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    iXLen%2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwsll.nxv2i64.nxv2i32(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vwsll_vx_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsll.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i32> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i32>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen%2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    iXLen%2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwsll.nxv4i64.nxv4i32(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vwsll_vx_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    vwsll.vx v12, v8, a0
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsll.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i32> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i32>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen%2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    iXLen%2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwsll.nxv8i64.nxv8i32(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vwsll_vx_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, iXLen%1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    vwsll.vx v16, v8, a0
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsll.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i32> %0,
+    iXLen%1,
+    iXLen %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i32>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen%2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    vwsll.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    iXLen%2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwsll_vi_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vwsll.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsll.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i8> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwsll_mask_vi_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v9, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsll_vi_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vwsll.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsll.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i8> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsll_mask_vi_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v9, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    iXLen 1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsll_vi_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsll.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i8> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsll_mask_vi_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v9, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    iXLen 1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsll_vi_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vwsll.vi v10, v8, 1
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsll.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i8> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsll_mask_vi_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v10, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    iXLen 1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsll_vi_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vwsll.vi v12, v8, 1
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsll.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i8> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsll_mask_vi_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v12, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    iXLen 1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsll_vi_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vwsll.vi v16, v8, 1
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsll.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i8> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsll_mask_vi_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v16, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    iXLen 1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsll_vi_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vwsll.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsll.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i16> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsll_mask_vi_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v9, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    iXLen 1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsll_vi_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsll.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i16> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsll_mask_vi_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v9, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    iXLen 1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsll_vi_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vwsll.vi v10, v8, 1
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i16> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsll_mask_vi_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v10, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    iXLen 1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsll_vi_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vwsll.vi v12, v8, 1
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsll.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i16> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsll_mask_vi_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v12, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    iXLen 1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsll_vi_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vwsll.vi v16, v8, 1
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsll.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i16> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsll_mask_vi_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v16, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    iXLen 1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsll_vi_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vwsll.vi v9, v8, 1
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsll.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i32> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsll_mask_vi_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v9, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    iXLen 1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsll_vi_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vwsll.vi v10, v8, 1
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsll.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i32> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsll_mask_vi_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v10, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    iXLen 1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsll_vi_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vwsll.vi v12, v8, 1
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsll.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i32> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsll_mask_vi_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v12, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    iXLen 1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsll_vi_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_vi_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vwsll.vi v16, v8, 1
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsll.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i32> %0,
+    iXLen 1,
+    iXLen %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsll_mask_vi_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vwsll.vi v8, v16, 1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    iXLen 1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i64> %a
+}


        


More information about the llvm-commits mailing list