[llvm-branch-commits] [llvm] 19db6a6 - [RISCV] Define vadc/vmadc/vsbc/vmsbc intrinsics.

Hsiangkai Wang via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Dec 15 14:36:03 PST 2020


Author: Hsiangkai Wang
Date: 2020-12-16T06:31:47+08:00
New Revision: 19db6a652b88674b5b0a12eebc4b68244ec88ee4

URL: https://github.com/llvm/llvm-project/commit/19db6a652b88674b5b0a12eebc4b68244ec88ee4
DIFF: https://github.com/llvm/llvm-project/commit/19db6a652b88674b5b0a12eebc4b68244ec88ee4.diff

LOG: [RISCV] Define vadc/vmadc/vsbc/vmsbc intrinsics.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Hsiangkai Wang <kai.wang at sifive.com>

Differential Revision: https://reviews.llvm.org/D93175

Added: 
    llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 4e90f06192b1..6fca577f335a 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -113,6 +113,34 @@ let TargetPrefix = "riscv" in {
                     [IntrNoMem]>, RISCVVIntrinsic {
     let ExtendOperand = 3;
   }
+  // For binary operations with V0 as input.
+  // Input: (vector_in, vector_in/scalar_in, V0, vl)
+  class RISCVBinaryWithV0
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
+                     llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic {
+    let ExtendOperand = 2;
+  }
+  // For binary operations with mask type output and V0 as input.
+  // Output: (mask type output)
+  // Input: (vector_in, vector_in/scalar_in, V0, vl)
+  class RISCVBinaryMOutWithV0
+        :Intrinsic<[llvm_anyvector_ty],
+                   [llvm_anyvector_ty, llvm_any_ty, LLVMMatchType<0>,
+                    llvm_anyint_ty],
+                   [IntrNoMem]>, RISCVVIntrinsic {
+    let ExtendOperand = 2;
+  }
+  // For binary operations with mask type output.
+  // Output: (mask type output)
+  // Input: (vector_in, vector_in/scalar_in, vl)
+  class RISCVBinaryMOut
+        : Intrinsic<[llvm_anyvector_ty],
+                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic {
+    let ExtendOperand = 2;
+  }
 
   multiclass RISCVBinaryAAX {
     def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
@@ -122,6 +150,15 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
     def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
   }
+  multiclass RISCVBinaryWithV0 {
+    def "int_riscv_" # NAME : RISCVBinaryWithV0;
+  }
+  multiclass RISCVBinaryMaskOutWithV0 {
+    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
+  }
+  multiclass RISCVBinaryMaskOut {
+    def "int_riscv_" # NAME : RISCVBinaryMOut;
+  }
 
   defm vadd : RISCVBinaryAAX;
   defm vsub : RISCVBinaryAAX;
@@ -136,4 +173,12 @@ let TargetPrefix = "riscv" in {
   defm vwsubu_w : RISCVBinaryAAX;
   defm vwsub_w : RISCVBinaryAAX;
 
+  defm vadc : RISCVBinaryWithV0;
+  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
+  defm vmadc : RISCVBinaryMaskOut;
+
+  defm vsbc : RISCVBinaryWithV0;
+  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
+  defm vmsbc : RISCVBinaryMaskOut;
+
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index bae00bb6cda6..63de04c47479 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -277,6 +277,31 @@ class VPseudoBinaryMask<VReg RetClass,
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+class VPseudoBinaryCarryIn<VReg RetClass,
+                           VReg Op1Class,
+                           DAGOperand Op2Class,
+                           LMULInfo MInfo,
+                           bit CarryIn,
+                           string Constraint> :
+        Pseudo<(outs RetClass:$rd),
+               !if(!eq(CarryIn, 1),
+                  (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, GPR:$vl,
+                       ixlenimm:$sew),
+                  (ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew)), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = Constraint;
+  let Uses = [VL, VTYPE];
+  let VLIndex = !if(!eq(CarryIn, 1), 4, 3);
+  let SEWIndex = !if(!eq(CarryIn, 1), 5, 4);
+  let MergeOpIndex = InvalidIndex.V;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+  let VLMul = MInfo.value;
+}
+
 multiclass VPseudoBinary<VReg RetClass,
                          VReg Op1Class,
                          DAGOperand Op2Class,
@@ -336,6 +361,39 @@ multiclass VPseudoBinaryW_WX {
                              "@earlyclobber $rd">;
 }
 
+// For vadc and vsbc, the instruction encoding is reserved if the destination
+// vector register is v0.
+// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
+multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
+                             string Constraint = ""> {
+  foreach m = MxList.m in
+    def "_VV" # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX :
+      VPseudoBinaryCarryIn<!if(!eq(CarryOut, 1), VR,
+                           !if(!and(!eq(CarryIn, 1), !eq(CarryOut, 0)),
+                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+                           m.vrclass, m.vrclass, m, CarryIn, Constraint>;
+}
+
+multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
+                             string Constraint = ""> {
+  foreach m = MxList.m in
+    def "_VX" # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX :
+      VPseudoBinaryCarryIn<!if(!eq(CarryOut, 1), VR,
+                           !if(!and(!eq(CarryIn, 1), !eq(CarryOut, 0)),
+                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+                           m.vrclass, GPR, m, CarryIn, Constraint>;
+}
+
+multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
+                             string Constraint = ""> {
+  foreach m = MxList.m in
+    def "_VI" # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX :
+      VPseudoBinaryCarryIn<!if(!eq(CarryOut, 1), VR,
+                           !if(!and(!eq(CarryIn, 1), !eq(CarryOut, 0)),
+                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+                           m.vrclass, simm5, m, CarryIn, Constraint>;
+}
+
 multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
   defm "" : VPseudoBinaryV_VV;
   defm "" : VPseudoBinaryV_VX;
@@ -362,6 +420,39 @@ multiclass VPseudoBinaryW_WV_WX {
   defm "" : VPseudoBinaryW_WX;
 }
 
+multiclass VPseudoBinaryV_VM_XM_IM {
+  defm "" : VPseudoBinaryV_VM;
+  defm "" : VPseudoBinaryV_XM;
+  defm "" : VPseudoBinaryV_IM;
+}
+
+multiclass VPseudoBinaryV_VM_XM {
+  defm "" : VPseudoBinaryV_VM;
+  defm "" : VPseudoBinaryV_XM;
+}
+
+multiclass VPseudoBinaryM_VM_XM_IM<string Constraint> {
+  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
+  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
+  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
+}
+
+multiclass VPseudoBinaryM_VM_XM<string Constraint> {
+  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
+  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
+}
+
+multiclass VPseudoBinaryM_V_X_I<string Constraint> {
+  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
+  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
+  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
+}
+
+multiclass VPseudoBinaryM_V_X<string Constraint> {
+  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
+  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
+}
+
 //===----------------------------------------------------------------------===//
 // Helpers to define the 
diff erent patterns.
 //===----------------------------------------------------------------------===//
@@ -453,6 +544,50 @@ multiclass VPatBinary<string intrinsic,
                        op2_kind>;
 }
 
+multiclass VPatBinaryCarryIn<string intrinsic,
+                             string inst,
+                             string kind,
+                             ValueType result_type,
+                             ValueType op1_type,
+                             ValueType op2_type,
+                             ValueType mask_type,
+                             int sew,
+                             LMULInfo vlmul,
+                             VReg op1_reg_class,
+                             DAGOperand op2_kind>
+{
+  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
+                         (op1_type op1_reg_class:$rs1),
+                         (op2_type op2_kind:$rs2),
+                         (mask_type V0),
+                         (XLenVT GPR:$vl))),
+                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                         (op1_type op1_reg_class:$rs1),
+                         ToFPR32<op2_type, op2_kind, "rs2">.ret,
+                         (mask_type V0), (NoX0 GPR:$vl), sew)>;
+}
+
+multiclass VPatBinaryMaskOut<string intrinsic,
+                             string inst,
+                             string kind,
+                             ValueType result_type,
+                             ValueType op1_type,
+                             ValueType op2_type,
+                             int sew,
+                             LMULInfo vlmul,
+                             VReg op1_reg_class,
+                             DAGOperand op2_kind>
+{
+  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
+                         (op1_type op1_reg_class:$rs1),
+                         (op2_type op2_kind:$rs2),
+                         (XLenVT GPR:$vl))),
+                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+                         (op1_type op1_reg_class:$rs1),
+                         ToFPR32<op2_type, op2_kind, "rs2">.ret,
+                         (NoX0 GPR:$vl), sew)>;
+}
+
 multiclass VPatBinaryV_VV<string intrinsic, string instruction,
                           list<VTypeInfo> vtilist> {
   foreach vti = vtilist in
@@ -524,6 +659,60 @@ multiclass VPatBinaryW_WX<string intrinsic, string instruction> {
   }
 }
 
+multiclass VPatBinaryV_VM<string intrinsic, string instruction,
+                          bit CarryOut = 0> {
+  foreach vti = AllIntegerVectors in
+    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
+                             !if(!eq(CarryOut, 1), vti.Mask, vti.Vector),
+                             vti.Vector, vti.Vector, vti.Mask,
+                             vti.SEW, vti.LMul,
+                             vti.RegClass, vti.RegClass>;
+}
+
+multiclass VPatBinaryV_XM<string intrinsic, string instruction,
+                          bit CarryOut = 0> {
+  foreach vti = AllIntegerVectors in
+    defm : VPatBinaryCarryIn<intrinsic, instruction, "VXM",
+                             !if(!eq(CarryOut, 1), vti.Mask, vti.Vector),
+                             vti.Vector, XLenVT, vti.Mask,
+                             vti.SEW, vti.LMul,
+                             vti.RegClass, GPR>;
+}
+
+multiclass VPatBinaryV_IM<string intrinsic, string instruction,
+                          bit CarryOut = 0> {
+  foreach vti = AllIntegerVectors in
+    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
+                             !if(!eq(CarryOut, 1), vti.Mask, vti.Vector),
+                             vti.Vector, XLenVT, vti.Mask,
+                             vti.SEW, vti.LMul,
+                             vti.RegClass, simm5>;
+}
+
+multiclass VPatBinaryV_V<string intrinsic, string instruction> {
+  foreach vti = AllIntegerVectors in
+    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
+                             vti.Mask, vti.Vector, vti.Vector,
+                             vti.SEW, vti.LMul,
+                             vti.RegClass, vti.RegClass>;
+}
+
+multiclass VPatBinaryV_X<string intrinsic, string instruction> {
+  foreach vti = AllIntegerVectors in
+    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
+                             vti.Mask, vti.Vector, XLenVT,
+                             vti.SEW, vti.LMul,
+                             vti.RegClass, GPR>;
+}
+
+multiclass VPatBinaryV_I<string intrinsic, string instruction> {
+  foreach vti = AllIntegerVectors in
+    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
+                             vti.Mask, vti.Vector, XLenVT,
+                             vti.SEW, vti.LMul,
+                             vti.RegClass, simm5>;
+}
+
 multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
                                 list<VTypeInfo> vtilist>
 {
@@ -558,6 +747,45 @@ multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction>
   defm "" : VPatBinaryW_WX<intrinsic, instruction>;
 }
 
+multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
+{
+  defm "" : VPatBinaryV_VM<intrinsic, instruction>;
+  defm "" : VPatBinaryV_XM<intrinsic, instruction>;
+  defm "" : VPatBinaryV_IM<intrinsic, instruction>;
+}
+
+multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
+{
+  defm "" : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>;
+  defm "" : VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
+  defm "" : VPatBinaryV_IM<intrinsic, instruction, /*CarryOut=*/1>;
+}
+
+multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
+{
+  defm "" : VPatBinaryV_V<intrinsic, instruction>;
+  defm "" : VPatBinaryV_X<intrinsic, instruction>;
+  defm "" : VPatBinaryV_I<intrinsic, instruction>;
+}
+
+multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
+{
+  defm "" : VPatBinaryV_VM<intrinsic, instruction>;
+  defm "" : VPatBinaryV_XM<intrinsic, instruction>;
+}
+
+multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
+{
+  defm "" : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>;
+  defm "" : VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
+}
+
+multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
+{
+  defm "" : VPatBinaryV_V<intrinsic, instruction>;
+  defm "" : VPatBinaryV_X<intrinsic, instruction>;
+}
+
 //===----------------------------------------------------------------------===//
 // Pseudo instructions and patterns.
 //===----------------------------------------------------------------------===//
@@ -694,6 +922,16 @@ defm PseudoVWSUBU    : VPseudoBinaryW_WV_WX;
 defm PseudoVWADD     : VPseudoBinaryW_WV_WX;
 defm PseudoVWSUB     : VPseudoBinaryW_WV_WX;
 
+//===----------------------------------------------------------------------===//
+// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVADC        : VPseudoBinaryV_VM_XM_IM;
+defm PseudoVMADC       : VPseudoBinaryM_VM_XM_IM<"@earlyclobber $rd">;
+defm PseudoVMADC       : VPseudoBinaryM_V_X_I<"@earlyclobber $rd">;
+
+defm PseudoVSBC        : VPseudoBinaryV_VM_XM;
+defm PseudoVMSBC       : VPseudoBinaryM_VM_XM<"@earlyclobber $rd">;
+defm PseudoVMSBC       : VPseudoBinaryM_V_X<"@earlyclobber $rd">;
 
 //===----------------------------------------------------------------------===//
 // Patterns.
@@ -725,4 +963,15 @@ defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU">;
 defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD">;
 defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB">;
 
+//===----------------------------------------------------------------------===//
+// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
+defm "" : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
+defm "" : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
+
+defm "" : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
+defm "" : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
+defm "" : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
+
 } // Predicates = [HasStdExtV]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
new file mode 100644
index 000000000000..0557e58946ce
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
@@ -0,0 +1,973 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
new file mode 100644
index 000000000000..4c3e9a3e4473
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
@@ -0,0 +1,1189 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
new file mode 100644
index 000000000000..22efd35fa56e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 64 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 16 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
new file mode 100644
index 000000000000..9b71d847a200
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
@@ -0,0 +1,1079 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 64 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i64(
+    <vscale x 1 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
+    <vscale x 2 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i64(
+    <vscale x 4 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
+    <vscale x 8 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
new file mode 100644
index 000000000000..22cd8afb1335
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
@@ -0,0 +1,973 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
new file mode 100644
index 000000000000..b94a2b9f4d98
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
@@ -0,0 +1,1189 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i64(
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i64(
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i64(
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i64(
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i64(
+    <vscale x 1 x i64> %0,
+    i64 9,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i64(
+    <vscale x 2 x i64> %0,
+    i64 9,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i64(
+    <vscale x 4 x i64> %0,
+    i64 9,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i64(
+    <vscale x 8 x i64> %0,
+    i64 9,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
new file mode 100644
index 000000000000..4e8eabcf7ea3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
@@ -0,0 +1,649 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
new file mode 100644
index 000000000000..b28aaaef3b02
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
@@ -0,0 +1,793 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
new file mode 100644
index 000000000000..6a8253ffc504
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
@@ -0,0 +1,721 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
new file mode 100644
index 000000000000..5b22e5977ffd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
@@ -0,0 +1,881 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64(
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64(
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64(
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64(
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
new file mode 100644
index 000000000000..71efe008d445
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
@@ -0,0 +1,721 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
new file mode 100644
index 000000000000..9f79026a2f95
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
@@ -0,0 +1,881 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
+  <vscale x 1 x i8>,
+  i8,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
+  <vscale x 2 x i8>,
+  i8,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
+  <vscale x 4 x i8>,
+  i8,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
+  <vscale x 8 x i8>,
+  i8,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
+  <vscale x 16 x i8>,
+  i8,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
+  <vscale x 32 x i8>,
+  i8,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
+  <vscale x 64 x i8>,
+  i8,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 %1,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
+  <vscale x 1 x i16>,
+  i16,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
+  <vscale x 2 x i16>,
+  i16,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
+  <vscale x 4 x i16>,
+  i16,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
+  <vscale x 8 x i16>,
+  i16,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
+  <vscale x 16 x i16>,
+  i16,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
+  <vscale x 32 x i16>,
+  i16,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
+  <vscale x 1 x i32>,
+  i32,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
+  <vscale x 2 x i32>,
+  i32,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
+  <vscale x 4 x i32>,
+  i32,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
+  <vscale x 8 x i32>,
+  i32,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
+  <vscale x 16 x i32>,
+  i32,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK:       vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}


        


More information about the llvm-branch-commits mailing list