[llvm] e673d40 - [RISCV] Define vmsbf.m/vmsif.m/vmsof.m/viota.m/vid.v intrinsics.

Zakk Chen via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 28 06:16:28 PST 2020


Author: Zakk Chen
Date: 2020-12-28T05:54:18-08:00
New Revision: e673d40199477f48b78ed9ad790ce7356474f907

URL: https://github.com/llvm/llvm-project/commit/e673d40199477f48b78ed9ad790ce7356474f907
DIFF: https://github.com/llvm/llvm-project/commit/e673d40199477f48b78ed9ad790ce7356474f907.diff

LOG: [RISCV] Define vmsbf.m/vmsif.m/vmsof.m/viota.m/vid.v intrinsics.

Define those intrinsics and lower to V instructions.

Use update_llc_test_checks.py for viota.m tests to check
earlyclobber is applied correctly.
mask viota.m tests uses the same argument as input and mask for
avoid dependency of D93364.

We work with @rogfer01 from BSC to come out this patch.

Reviewed By: HsiangKai

Differential Revision: https://reviews.llvm.org/D93823

Added: 
    llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 4660de58d25b..be11b518416c 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -389,6 +389,21 @@ let TargetPrefix = "riscv" in {
         : Intrinsic<[llvm_anyint_ty],
                     [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
                     [IntrNoMem]>, RISCVVIntrinsic;
+  // For mask unary operations with mask type in/out without mask
+  // Output: (mask type output)
+  // Input: (mask type vector_in, vl)
+  class RISCVMaskUnaryMOutNoMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic;
+  // For mask unary operations with mask type in/out with mask
+  // Output: (mask type output)
+  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
+  class RISCVMaskUnaryMOutMask
+        : Intrinsic<[llvm_anyint_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>,
+                     LLVMMatchType<0>, llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic;
 
   multiclass RISCVUSLoad {
     def "int_riscv_" # NAME : RISCVUSLoad;
@@ -469,6 +484,10 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
     def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
   }
+  multiclass RISCVMaskUnaryMOut {
+    def "int_riscv_" # NAME : RISCVMaskUnaryMOutNoMask;
+    def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
+  }
 
   defm vle : RISCVUSLoad;
   defm vleff : RISCVUSLoad;
@@ -685,5 +704,35 @@ let TargetPrefix = "riscv" in {
 
   defm vpopc : RISCVMaskUnarySOut;
   defm vfirst : RISCVMaskUnarySOut;
+  defm vmsbf : RISCVMaskUnaryMOut;
+  defm vmsof : RISCVMaskUnaryMOut;
+  defm vmsif : RISCVMaskUnaryMOut;
+
+  // Output: (vector)
+  // Input: (mask type input, vl)
+  def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
+                                  [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                                   llvm_anyint_ty],
+                                  [IntrNoMem]>, RISCVVIntrinsic;
+  // Output: (vector)
+  // Input: (maskedoff, mask type vector_in, mask, vl)
+  def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
+                                       [LLVMMatchType<0>,
+                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                                        llvm_anyint_ty],
+                                       [IntrNoMem]>, RISCVVIntrinsic;
+  // Output: (vector)
+  // Input: (vl)
+  def int_riscv_vid : Intrinsic<[llvm_anyvector_ty],
+                                [llvm_anyint_ty],
+                                [IntrNoMem]>, RISCVVIntrinsic;
+  // Output: (vector)
+  // Input: (maskedoff, mask, vl)
+  def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
+                                     [LLVMMatchType<0>,
+                                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                                      llvm_anyint_ty],
+                                     [IntrNoMem]>, RISCVVIntrinsic;
 
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e064b1e811f7..72e41daa0c3a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -507,14 +507,47 @@ class VPseudoUnaryNoDummyMask<VReg RetClass,
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VMaskPseudoUnarySOutNoMask:
-        Pseudo<(outs GPR:$rd),
-               (ins VR:$rs1, GPR:$vl, ixlenimm:$sew), []>,
+class VPseudoNullaryNoMask<VReg RegClass>:
+      Pseudo<(outs RegClass:$rd),
+             (ins GPR:$vl, ixlenimm:$sew),
+             []>, RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 1;
+  let SEWIndex = 2;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoNullaryMask<VReg RegClass>:
+      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
+             (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, GPR:$vl,
+              ixlenimm:$sew), []>, RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints ="$rd = $merge";
+  let Uses = [VL, VTYPE];
+  let VLIndex = 3;
+  let SEWIndex = 4;
+  let MergeOpIndex = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+// RetClass could be GPR or VReg.
+class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
+        Pseudo<(outs RetClass:$rd),
+               (ins OpClass:$rs2, GPR:$vl, ixlenimm:$sew), []>,
         RISCVVPseudo {
   let mayLoad = 0;
   let mayStore = 0;
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
+  let Constraints = Constraint;
   let Uses = [VL, VTYPE];
   let VLIndex = 2;
   let SEWIndex = 3;
@@ -522,7 +555,25 @@ class VMaskPseudoUnarySOutNoMask:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VMaskPseudoUnarySOutMask:
+class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
+        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+                    VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 4;
+  let SEWIndex = 5;
+  let MergeOpIndex = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+// mask unary operation without maskedoff
+class VPseudoMaskUnarySOutMask:
         Pseudo<(outs GPR:$rd),
                (ins VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
         RISCVVPseudo {
@@ -533,7 +584,23 @@ class VMaskPseudoUnarySOutMask:
   let Uses = [VL, VTYPE];
   let VLIndex = 3;
   let SEWIndex = 4;
-  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+// Masked mask operation have no $rd=$merge constraints
+class VPseudoUnaryMOutMask:
+        Pseudo<(outs VR:$rd),
+               (ins VR:$merge, VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = "$rd = $merge";
+  let Uses = [VL, VTYPE];
+  let VLIndex = 4;
+  let SEWIndex = 5;
+  let MergeOpIndex = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -725,12 +792,41 @@ multiclass VPseudoIStore {
   }
 }
 
-multiclass VMaskPseudoUnarySOut {
+multiclass VPseudoUnaryS_M {
   foreach mti = AllMasks in
   {
     let VLMul = mti.LMul.value in {
-      def "_M_" # mti.BX : VMaskPseudoUnarySOutNoMask;
-      def "_M_" # mti.BX # "_MASK" : VMaskPseudoUnarySOutMask;
+      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>;
+      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask;
+    }
+  }
+}
+
+multiclass VPseudoUnaryM_M {
+  foreach mti = AllMasks in
+  {
+    let VLMul = mti.LMul.value in {
+      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR>;
+      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR>;
+    }
+  }
+}
+
+multiclass VPseudoMaskNullaryV {
+  foreach m = MxList.m in {
+    let VLMul = m.value in {
+      def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>;
+      def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>;
+    }
+  }
+}
+
+multiclass VPseudoUnaryV_M {
+  defvar constraint = "@earlyclobber $rd";
+  foreach m = MxList.m in {
+    let VLMul = m.value in {
+      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>;
+      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>;
     }
   }
 }
@@ -1044,6 +1140,65 @@ multiclass VPseudoReductionV_VS {
 //===----------------------------------------------------------------------===//
 // Helpers to define the intrinsic patterns.
 //===----------------------------------------------------------------------===//
+
+class VPatUnaryNoMask<string intrinsic_name,
+                      string inst,
+                      string kind,
+                      ValueType result_type,
+                      ValueType op2_type,
+                      int sew,
+                      LMULInfo vlmul,
+                      VReg op2_reg_class> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+                   (op2_type op2_reg_class:$rs2),
+                   (XLenVT GPR:$vl))),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                   (op2_type op2_reg_class:$rs2),
+                   (NoX0 GPR:$vl), sew)>;
+
+class VPatUnaryMask<string intrinsic_name,
+                    string inst,
+                    string kind,
+                    ValueType result_type,
+                    ValueType op2_type,
+                    ValueType mask_type,
+                    int sew,
+                    LMULInfo vlmul,
+                    VReg result_reg_class,
+                    VReg op2_reg_class> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   (mask_type V0),
+                   (XLenVT GPR:$vl))),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_reg_class:$rs2),
+                   (mask_type V0), (NoX0 GPR:$vl), sew)>;
+
+class VPatMaskUnaryNoMask<string intrinsic_name,
+                          string inst,
+                          MTypeInfo mti> :
+  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
+                (mti.Mask VR:$rs2),
+                (XLenVT GPR:$vl))),
+                (!cast<Instruction>(inst#"_M_"#mti.BX)
+                (mti.Mask VR:$rs2),
+                (NoX0 GPR:$vl), mti.SEW)>;
+
+class VPatMaskUnaryMask<string intrinsic_name,
+                        string inst,
+                        MTypeInfo mti> :
+  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
+                (mti.Mask VR:$merge),
+                (mti.Mask VR:$rs2),
+                (mti.Mask V0),
+                (XLenVT GPR:$vl))),
+                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
+                (mti.Mask VR:$merge),
+                (mti.Mask VR:$rs2),
+                (mti.Mask V0), (NoX0 GPR:$vl), mti.SEW)>;
+
 class VPatBinaryNoMask<string intrinsic_name,
                        string inst,
                        string kind,
@@ -1259,7 +1414,7 @@ multiclass VPatIStore<string intrinsic,
               (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
 }
 
-multiclass VPatMaskUnarySOut<string intrinsic_name,
+multiclass VPatUnaryS_M<string intrinsic_name,
                              string inst>
 {
   foreach mti = AllMasks in {
@@ -1274,6 +1429,40 @@ multiclass VPatMaskUnarySOut<string intrinsic_name,
   }
 }
 
+multiclass VPatUnaryM_M<string intrinsic,
+                         string inst>
+{
+  foreach mti = AllMasks in {
+    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
+    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
+  }
+}
+
+multiclass VPatUnaryV_M<string intrinsic, string instruction>
+{
+  foreach vti = AllIntegerVectors in {
+    def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
+                          vti.SEW, vti.LMul, VR>;
+    def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
+                        vti.Mask, vti.SEW, vti.LMul, vti.RegClass, VR>;
+  }
+}
+
+multiclass VPatNullaryV<string intrinsic, string instruction>
+{
+  foreach vti = AllIntegerVectors in {
+    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
+                          (XLenVT GPR:$vl))),
+                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
+                          (NoX0 GPR:$vl), vti.SEW)>;
+    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
+                          (vti.Vector vti.RegClass:$merge),
+                          (vti.Mask V0), (XLenVT GPR:$vl))),
+                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
+                          vti.RegClass:$merge, (vti.Mask V0),
+                          (NoX0 GPR:$vl), vti.SEW)>;
+  }
+}
 
 multiclass VPatBinary<string intrinsic,
                       string inst,
@@ -2199,13 +2388,38 @@ defm PseudoVMXNOR: VPseudoBinaryM_MM;
 // 16.2. Vector mask population count vpopc
 //===----------------------------------------------------------------------===//
 
-defm PseudoVPOPC: VMaskPseudoUnarySOut;
+defm PseudoVPOPC: VPseudoUnaryS_M;
 
 //===----------------------------------------------------------------------===//
 // 16.3. vfirst find-first-set mask bit
 //===----------------------------------------------------------------------===//
 
-defm PseudoVFIRST: VMaskPseudoUnarySOut;
+defm PseudoVFIRST: VPseudoUnaryS_M;
+
+//===----------------------------------------------------------------------===//
+// 16.4. vmsbf.m set-before-first mask bit
+//===----------------------------------------------------------------------===//
+defm PseudoVMSBF: VPseudoUnaryM_M;
+
+//===----------------------------------------------------------------------===//
+// 16.5. vmsif.m set-including-first mask bit
+//===----------------------------------------------------------------------===//
+defm PseudoVMSIF: VPseudoUnaryM_M;
+
+//===----------------------------------------------------------------------===//
+// 16.6. vmsof.m set-only-first mask bit
+//===----------------------------------------------------------------------===//
+defm PseudoVMSOF: VPseudoUnaryM_M;
+
+//===----------------------------------------------------------------------===//
+// 16.8.  Vector Iota Instruction
+//===----------------------------------------------------------------------===//
+defm PseudoVIOTA_M: VPseudoUnaryV_M;
+
+//===----------------------------------------------------------------------===//
+// 16.9. Vector Element Index Instruction
+//===----------------------------------------------------------------------===//
+defm PseudoVID : VPseudoMaskNullaryV;
 
 //===----------------------------------------------------------------------===//
 // 17. Vector Permutation Instructions
@@ -2686,32 +2900,54 @@ defm "" : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloa
 // 16. Vector Mask Instructions
 //===----------------------------------------------------------------------===//
 
+let Predicates = [HasStdExtV] in {
 //===----------------------------------------------------------------------===//
 // 16.1 Vector Mask-Register Logical Instructions
 //===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtV] in {
-  defm "" : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
-  defm "" : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
-  defm "" : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">;
-  defm "" : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
-  defm "" : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
-  defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
-  defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
-  defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
-} // Predicates = [HasStdExtV]
+defm "" : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
+defm "" : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
+defm "" : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">;
+defm "" : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
+defm "" : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
+defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
+defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
+defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
 
 //===----------------------------------------------------------------------===//
 // 16.2. Vector mask population count vpopc
 //===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtV] in {
-  defm "" : VPatMaskUnarySOut<"int_riscv_vpopc", "PseudoVPOPC">;
-} // Predicates = [HasStdExtV]
+defm "" : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">;
 
 //===----------------------------------------------------------------------===//
 // 16.3. vfirst find-first-set mask bit
 //===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtV] in {
-  defm "" : VPatMaskUnarySOut<"int_riscv_vfirst", "PseudoVFIRST">;
+defm "" : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
+
+//===----------------------------------------------------------------------===//
+// 16.4. vmsbf.m set-before-first mask bit
+//===----------------------------------------------------------------------===//
+defm "" : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
+
+//===----------------------------------------------------------------------===//
+// 16.5. vmsif.m set-including-first mask bit
+//===----------------------------------------------------------------------===//
+defm "" : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
+
+//===----------------------------------------------------------------------===//
+// 16.6. vmsof.m set-only-first mask bit
+//===----------------------------------------------------------------------===//
+defm "" : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
+
+//===----------------------------------------------------------------------===//
+// 16.8.  Vector Iota Instruction
+//===----------------------------------------------------------------------===//
+defm "" : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
+
+//===----------------------------------------------------------------------===//
+// 16.9. Vector Element Index Instruction
+//===----------------------------------------------------------------------===//
+defm "" : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
+
 } // Predicates = [HasStdExtV]
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
new file mode 100644
index 000000000000..9e2d395a44ec
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
@@ -0,0 +1,545 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv1i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+    i32 %0)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv2i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
+    i32 %0)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv4i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
+    i32 %0)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv8i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
+    i32 %0)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv16i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
+    i32 %0)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv32i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
+    i32 %0)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv1i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
+    i32 %0)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv2i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
+    i32 %0)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv4i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
+    i32 %0)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv8i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
+    i32 %0)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv16i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
+    i32 %0)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv32i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
+    i32 %0)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv1i32
+; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
+    i32 %0)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32
+; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv2i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
+    i32 %0)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv4i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
+    i32 %0)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv8i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
+    i32 %0)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv16i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
+    i32 %0)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
new file mode 100644
index 000000000000..53aa6aa02104
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
@@ -0,0 +1,673 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv1i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+    i64 %0)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv2i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
+    i64 %0)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv4i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
+    i64 %0)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv8i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
+    i64 %0)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv16i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
+    i64 %0)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv32i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
+    i64 %0)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv1i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
+    i64 %0)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv2i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
+    i64 %0)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16
+; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv4i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
+    i64 %0)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv8i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
+    i64 %0)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv16i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
+    i64 %0)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv32i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
+    i64 %0)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16
+; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv1i32
+; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
+    i64 %0)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32
+; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv2i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
+    i64 %0)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv4i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
+    i64 %0)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv8i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
+    i64 %0)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv16i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
+    i64 %0)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32
+; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv1i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
+    i64 %0)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv2i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
+    i64 %0)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv4i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
+    i64 %0)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_v_nxv8i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}
+  %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
+    i64 %0)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64
+; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
+; CHECK:       vid.v {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
new file mode 100644
index 000000000000..bc1b1c56bb63
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
@@ -0,0 +1,722 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+    <vscale x 1 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
+    <vscale x 2 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
+    <vscale x 4 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
+    <vscale x 8 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
+    <vscale x 16 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
+    <vscale x 32 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
+    <vscale x 64 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
+    <vscale x 1 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
+    <vscale x 2 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
+    <vscale x 4 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
+    <vscale x 8 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
+    <vscale x 16 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
+    <vscale x 32 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
+    <vscale x 1 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
+    <vscale x 2 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
+    <vscale x 4 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
+    <vscale x 8 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
+    <vscale x 16 x i1> %0,
+    i32 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
new file mode 100644
index 000000000000..7a2783c29a1e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
@@ -0,0 +1,882 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+    <vscale x 1 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
+    <vscale x 2 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
+    <vscale x 4 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
+    <vscale x 8 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
+    <vscale x 16 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
+    <vscale x 32 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
+    <vscale x 64 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
+    <vscale x 1 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
+    <vscale x 2 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
+    <vscale x 4 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
+    <vscale x 8 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
+    <vscale x 16 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
+    <vscale x 32 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
+    <vscale x 1 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
+    <vscale x 2 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
+    <vscale x 4 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
+    <vscale x 8 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
+    <vscale x 16 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
+    <vscale x 1 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
+    <vscale x 2 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
+    <vscale x 4 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
+    <vscale x 8 x i1> %0,
+    i64 %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    viota.m v16, v0, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
new file mode 100644
index 000000000000..b0ee5ab3a27f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
@@ -0,0 +1,239 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
+    <vscale x 1 x i1> %0,
+    i32 %1)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
+    <vscale x 2 x i1> %0,
+    i32 %1)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
+    <vscale x 4 x i1> %0,
+    i32 %1)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
+    <vscale x 8 x i1> %0,
+    i32 %1)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
+    <vscale x 16 x i1> %0,
+    i32 %1)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
+    <vscale x 32 x i1> %0,
+    i32 %1)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
+    <vscale x 64 x i1> %0,
+    i32 %1)
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
new file mode 100644
index 000000000000..3dce4a537523
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
@@ -0,0 +1,239 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbf_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
+    <vscale x 1 x i1> %0,
+    i64 %1)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbf_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
+    <vscale x 2 x i1> %0,
+    i64 %1)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbf_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
+    <vscale x 4 x i1> %0,
+    i64 %1)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbf_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
+    <vscale x 8 x i1> %0,
+    i64 %1)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbf_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
+    <vscale x 16 x i1> %0,
+    i64 %1)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbf_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
+    <vscale x 32 x i1> %0,
+    i64 %1)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsbf_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
+    <vscale x 64 x i1> %0,
+    i64 %1)
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
new file mode 100644
index 000000000000..97fca5b2bc4a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
@@ -0,0 +1,239 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
+    <vscale x 1 x i1> %0,
+    i32 %1)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
+    <vscale x 2 x i1> %0,
+    i32 %1)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
+    <vscale x 4 x i1> %0,
+    i32 %1)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
+    <vscale x 8 x i1> %0,
+    i32 %1)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
+    <vscale x 16 x i1> %0,
+    i32 %1)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
+    <vscale x 32 x i1> %0,
+    i32 %1)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
+    <vscale x 64 x i1> %0,
+    i32 %1)
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
new file mode 100644
index 000000000000..280509a63fe4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
@@ -0,0 +1,239 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
+    <vscale x 1 x i1> %0,
+    i64 %1)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
+    <vscale x 2 x i1> %0,
+    i64 %1)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
+    <vscale x 4 x i1> %0,
+    i64 %1)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
+    <vscale x 8 x i1> %0,
+    i64 %1)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
+    <vscale x 16 x i1> %0,
+    i64 %1)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
+    <vscale x 32 x i1> %0,
+    i64 %1)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
+    <vscale x 64 x i1> %0,
+    i64 %1)
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
new file mode 100644
index 000000000000..8fa635bf3fe1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
@@ -0,0 +1,239 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
+    <vscale x 1 x i1> %0,
+    i32 %1)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
+    <vscale x 2 x i1> %0,
+    i32 %1)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
+    <vscale x 4 x i1> %0,
+    i32 %1)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
+    <vscale x 8 x i1> %0,
+    i32 %1)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
+    <vscale x 16 x i1> %0,
+    i32 %1)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
+    <vscale x 32 x i1> %0,
+    i32 %1)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
+    <vscale x 64 x i1> %0,
+    i32 %1)
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
new file mode 100644
index 000000000000..fab86d873002
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
@@ -0,0 +1,239 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
+    <vscale x 1 x i1> %0,
+    i64 %1)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
+    <vscale x 2 x i1> %0,
+    i64 %1)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
+    <vscale x 4 x i1> %0,
+    i64 %1)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
+    <vscale x 8 x i1> %0,
+    i64 %1)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
+    <vscale x 16 x i1> %0,
+    i64 %1)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
+    <vscale x 32 x i1> %0,
+    i64 %1)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
+    <vscale x 64 x i1> %0,
+    i64 %1)
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
+; CHECK:       vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+  ret <vscale x 64 x i1> %a
+}


        


More information about the llvm-commits mailing list