[llvm] a6805a0 - [RISCV] Define vadd/vsub/vrsub intrinsics and lower to V instructions.
Hsiangkai Wang via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 14 21:00:00 PST 2020
Author: Hsiangkai Wang
Date: 2020-12-15T12:56:49+08:00
New Revision: a6805a0e02c98c1065038c0c75a25c45ee29b932
URL: https://github.com/llvm/llvm-project/commit/a6805a0e02c98c1065038c0c75a25c45ee29b932
DIFF: https://github.com/llvm/llvm-project/commit/a6805a0e02c98c1065038c0c75a25c45ee29b932.diff
LOG: [RISCV] Define vadd/vsub/vrsub intrinsics and lower to V instructions.
This patch is based on the proposal from Roger Ferrer Ibanez.
http://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html
Differential Revision: https://reviews.llvm.org/D93013
Added:
llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.h
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
llvm/lib/Target/RISCV/RISCVRegisterInfo.td
llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 7590b568c367..9450875dece0 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -66,3 +66,43 @@ let TargetPrefix = "riscv" in {
defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
} // TargetPrefix = "riscv"
+
+//===----------------------------------------------------------------------===//
+// Vectors
+
+class RISCVVIntrinsic {
+ // These intrinsics may accept illegal integer values in their llvm_any_ty
+ // operand, so they have to be extended. If set to zero then the intrinsic
+ // does not have any operand that must be extended.
+ Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
+ bits<4> ExtendOperand = 0;
+}
+
+let TargetPrefix = "riscv" in {
+ // For destination vector type is the same as first source vector.
+ // Input: (vector_in, vector_in/scalar_in, vl)
+ class RISCVBinaryAAXNoMask
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 2;
+ }
+ // For destination vector type is the same as first source vector (with mask).
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
+ class RISCVBinaryAAXMask
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ llvm_anyvector_ty, llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 3;
+ }
+
+ multiclass RISCVBinaryAAX {
+ def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
+ def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
+ }
+
+ defm vadd : RISCVBinaryAAX;
+ defm vsub : RISCVBinaryAAX;
+ defm vrsub : RISCVBinaryAAX;
+} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d7496b3ac7c9..a6aa81be1e40 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -331,8 +331,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setBooleanContents(ZeroOrOneBooleanContent);
- if (Subtarget.hasStdExtV())
+ if (Subtarget.hasStdExtV()) {
setBooleanVectorContents(ZeroOrOneBooleanContent);
+ // RVV intrinsics may have illegal operands.
+ for (auto VT : {MVT::i8, MVT::i16, MVT::i32})
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, VT, Custom);
+ }
// Function alignments.
const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
@@ -1002,6 +1006,28 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDLoc DL(Op);
+
+ if (Subtarget.hasStdExtV()) {
+ // Some RVV intrinsics may claim that they want an integer operand to be
+ // extended.
+ if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
+ RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
+ if (II->ExtendedOperand) {
+ assert(II->ExtendedOperand < Op.getNumOperands());
+ SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
+ SDValue &ScalarOp = Operands[II->ExtendedOperand];
+ if (ScalarOp.getValueType() == MVT::i8 ||
+ ScalarOp.getValueType() == MVT::i16 ||
+ ScalarOp.getValueType() == MVT::i32) {
+ ScalarOp =
+ DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
+ Operands);
+ }
+ }
+ }
+ }
+
switch (IntNo) {
default:
return SDValue(); // Don't custom lower most intrinsics.
@@ -2038,6 +2064,16 @@ static const MCPhysReg ArgFPR64s[] = {
RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
};
+// This is an interim calling convention and it may be changed in the future.
+static const MCPhysReg ArgVRs[] = {
+ RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20,
+ RISCV::V21, RISCV::V22, RISCV::V23
+};
+static const MCPhysReg ArgVRM2s[] = {
+ RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2
+};
+static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4};
+static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8};
// Pass a 2*XLEN argument that has been split into two XLEN values through
// registers or the stack as necessary.
@@ -2082,7 +2118,8 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
- bool IsRet, Type *OrigTy) {
+ bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
+ Optional<unsigned> FirstMaskArgument) {
unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
assert(XLen == 32 || XLen == 64);
MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
@@ -2215,7 +2252,34 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
Reg = State.AllocateReg(ArgFPR32s);
else if (ValVT == MVT::f64 && !UseGPRForF64)
Reg = State.AllocateReg(ArgFPR64s);
- else
+ else if (ValVT.isScalableVector()) {
+ const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
+ if (RC == &RISCV::VRRegClass) {
+ // Assign the first mask argument to V0.
+ // This is an interim calling convention and it may be changed in the
+ // future.
+ if (FirstMaskArgument.hasValue() &&
+ ValNo == FirstMaskArgument.getValue()) {
+ Reg = State.AllocateReg(RISCV::V0);
+ } else {
+ Reg = State.AllocateReg(ArgVRs);
+ }
+ } else if (RC == &RISCV::VRM2RegClass) {
+ Reg = State.AllocateReg(ArgVRM2s);
+ } else if (RC == &RISCV::VRM4RegClass) {
+ Reg = State.AllocateReg(ArgVRM4s);
+ } else if (RC == &RISCV::VRM8RegClass) {
+ Reg = State.AllocateReg(ArgVRM8s);
+ } else {
+ llvm_unreachable("Unhandled class register for ValueType");
+ }
+ if (!Reg) {
+ LocInfo = CCValAssign::Indirect;
+ // Try using a GPR to pass the address
+ Reg = State.AllocateReg(ArgGPRs);
+ LocVT = XLenVT;
+ }
+ } else
Reg = State.AllocateReg(ArgGPRs);
unsigned StackOffset =
Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
@@ -2238,8 +2302,9 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
return false;
}
- assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT) &&
- "Expected an XLenVT at this stage");
+ assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
+ (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&
+ "Expected an XLenVT or scalable vector types at this stage");
if (Reg) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -2256,12 +2321,32 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
return false;
}
+template <typename ArgTy>
+static void preAssignMask(const ArgTy &Args,
+ Optional<unsigned> &FirstMaskArgument,
+ CCState &CCInfo) {
+ unsigned NumArgs = Args.size();
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ MVT ArgVT = Args[I].VT;
+ if (!ArgVT.isScalableVector() ||
+ ArgVT.getVectorElementType().SimpleTy != MVT::i1)
+ continue;
+
+ FirstMaskArgument = I;
+ break;
+ }
+}
+
void RISCVTargetLowering::analyzeInputArgs(
MachineFunction &MF, CCState &CCInfo,
const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
unsigned NumArgs = Ins.size();
FunctionType *FType = MF.getFunction().getFunctionType();
+ Optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasStdExtV())
+ preAssignMask(Ins, FirstMaskArgument, CCInfo);
+
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Ins[i].VT;
ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
@@ -2274,7 +2359,8 @@ void RISCVTargetLowering::analyzeInputArgs(
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
- ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
+ ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
+ FirstMaskArgument)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n');
llvm_unreachable(nullptr);
@@ -2288,6 +2374,10 @@ void RISCVTargetLowering::analyzeOutputArgs(
CallLoweringInfo *CLI) const {
unsigned NumArgs = Outs.size();
+ Optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasStdExtV())
+ preAssignMask(Outs, FirstMaskArgument, CCInfo);
+
for (unsigned i = 0; i != NumArgs; i++) {
MVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
@@ -2295,7 +2385,8 @@ void RISCVTargetLowering::analyzeOutputArgs(
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
- ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
+ ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
+ FirstMaskArgument)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << "\n");
llvm_unreachable(nullptr);
@@ -2327,31 +2418,13 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
// The caller is responsible for loading the full value if the argument is
// passed with CCValAssign::Indirect.
static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
- const CCValAssign &VA, const SDLoc &DL) {
+ const CCValAssign &VA, const SDLoc &DL,
+ const RISCVTargetLowering &TLI) {
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
EVT LocVT = VA.getLocVT();
SDValue Val;
- const TargetRegisterClass *RC;
-
- switch (LocVT.getSimpleVT().SimpleTy) {
- default:
- llvm_unreachable("Unexpected register type");
- case MVT::i32:
- case MVT::i64:
- RC = &RISCV::GPRRegClass;
- break;
- case MVT::f16:
- RC = &RISCV::FPR16RegClass;
- break;
- case MVT::f32:
- RC = &RISCV::FPR32RegClass;
- break;
- case MVT::f64:
- RC = &RISCV::FPR64RegClass;
- break;
- }
-
+ const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
Register VReg = RegInfo.createVirtualRegister(RC);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
@@ -2623,7 +2696,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
else if (VA.isRegLoc())
- ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
+ ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
else
ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
@@ -3071,12 +3144,18 @@ bool RISCVTargetLowering::CanLowerReturn(
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
+
+ Optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasStdExtV())
+ preAssignMask(Outs, FirstMaskArgument, CCInfo);
+
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
- ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
+ ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
+ *this, FirstMaskArgument))
return false;
}
return true;
@@ -3673,3 +3752,12 @@ RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
StringRef(RegName) + "\"."));
return Reg;
}
+
+namespace llvm {
+namespace RISCVVIntrinsicsTable {
+
+#define GET_RISCVVIntrinsicsTable_IMPL
+#include "RISCVGenSearchableTables.inc"
+
+} // namespace RISCVVIntrinsicsTable
+} // namespace llvm
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 8f4505acb235..33db64308d6d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -87,6 +87,8 @@ class RISCVTargetLowering : public TargetLowering {
explicit RISCVTargetLowering(const TargetMachine &TM,
const RISCVSubtarget &STI);
+ const RISCVSubtarget &getSubtarget() const { return Subtarget; }
+
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const override;
@@ -269,6 +271,20 @@ class RISCVTargetLowering : public TargetLowering {
const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
MachineFunction &MF) const;
};
+
+namespace RISCVVIntrinsicsTable {
+
+struct RISCVVIntrinsicInfo {
+ unsigned int IntrinsicID;
+ unsigned int ExtendedOperand;
+};
+
+using namespace RISCV;
+
+#define GET_RISCVVIntrinsicsTable_DECL
+#include "RISCVGenSearchableTables.inc"
+
+} // end namespace RISCVVIntrinsicsTable
}
#endif
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index f79ab1ab1d7f..2a8623ac0b4e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -98,20 +98,37 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
}
- // FPR->FPR copies
+ // FPR->FPR copies and VR->VR copies.
unsigned Opc;
+ bool IsScalableVector = false;
if (RISCV::FPR16RegClass.contains(DstReg, SrcReg))
Opc = RISCV::FSGNJ_H;
else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
Opc = RISCV::FSGNJ_S;
else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
Opc = RISCV::FSGNJ_D;
- else
+ else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::PseudoVMV1R_V;
+ IsScalableVector = true;
+ } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::PseudoVMV2R_V;
+ IsScalableVector = true;
+ } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::PseudoVMV4R_V;
+ IsScalableVector = true;
+ } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
+ Opc = RISCV::PseudoVMV8R_V;
+ IsScalableVector = true;
+ } else
llvm_unreachable("Impossible reg-to-reg copy");
- BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
- .addReg(SrcReg, getKillRegState(KillSrc))
- .addReg(SrcReg, getKillRegState(KillSrc));
+ if (IsScalableVector)
+ BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else
+ BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 32762fd2803e..468ed00880ab 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -14,6 +14,23 @@
///
//===----------------------------------------------------------------------===//
+// X0 has special meaning for vsetvl/vsetvli.
+// rd | rs1 | AVL value | Effect on vl
+//--------------------------------------------------------------
+// !X0 | X0 | VLMAX | Set vl to VLMAX
+// X0 | X0 | Value in vl | Keep current vl, just change vtype.
+def NoX0 : SDNodeXForm<undef,
+[{
+ auto *C = dyn_cast<ConstantSDNode>(N);
+ if (C && C->isNullValue()) {
+ SDLoc DL(N);
+ return SDValue(CurDAG->getMachineNode(RISCV::ADDI, DL, Subtarget->getXLenVT(),
+ CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT()),
+ CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT())), 0);
+ }
+ return SDValue(N, 0);
+}]>;
+
//===----------------------------------------------------------------------===//
// Utilities.
//===----------------------------------------------------------------------===//
@@ -50,6 +67,18 @@ def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
// List of EEW.
defvar EEWList = [8, 16, 32, 64];
+// We only model FPR32 for V instructions in RISCVInstrInfoV.td.
+// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64
+// to FPR32 for V instructions is enough.
+class ToFPR32<ValueType type, DAGOperand operand, string name> {
+ dag ret = !cond(!eq(!cast<string>(operand), !cast<string>(FPR64)):
+ (EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32),
+ !eq(!cast<string>(operand), !cast<string>(FPR16)):
+ (SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16),
+ !eq(1, 1):
+ !dag(type, [operand], [name]));
+}
+
//===----------------------------------------------------------------------===//
// Vector register and vector group type information.
//===----------------------------------------------------------------------===//
@@ -103,13 +132,20 @@ defset list<VTypeInfo> AllVectors = {
// This class holds the record of the RISCVVPseudoTable below.
// This represents the information we need in codegen for each pseudo.
+// The definition should be consistent with `struct PseudoInfo` in
+// RISCVBaseInfo.h.
+class CONST8b<bits<8> val> {
+ bits<8> V = val;
+}
+def InvalidIndex : CONST8b<0x80>;
class RISCVVPseudo {
Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
Instruction BaseInstr;
- bits<8> VLIndex;
- bits<8> SEWIndex;
- bits<8> MergeOpIndex;
+ bits<8> VLIndex = InvalidIndex.V;
+ bits<8> SEWIndex = InvalidIndex.V;
+ bits<8> MergeOpIndex = InvalidIndex.V;
bits<3> VLMul;
+ bit HasDummyMask = 0;
}
// The actual table.
@@ -117,11 +153,19 @@ def RISCVVPseudosTable : GenericTable {
let FilterClass = "RISCVVPseudo";
let CppTypeName = "PseudoInfo";
let Fields = [ "Pseudo", "BaseInstr", "VLIndex", "SEWIndex", "MergeOpIndex",
- "VLMul" ];
+ "VLMul", "HasDummyMask" ];
let PrimaryKey = [ "Pseudo" ];
let PrimaryKeyName = "getPseudoInfo";
}
+def RISCVVIntrinsicsTable : GenericTable {
+ let FilterClass = "RISCVVIntrinsic";
+ let CppTypeName = "RISCVVIntrinsicInfo";
+ let Fields = ["IntrinsicID", "ExtendOperand"];
+ let PrimaryKey = ["IntrinsicID"];
+ let PrimaryKeyName = "getRISCVVIntrinsicInfo";
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the
diff erent pseudo instructions.
//===----------------------------------------------------------------------===//
@@ -138,27 +182,53 @@ class PseudoToVInst<string PseudoInst> {
!subst("Pseudo", "", PseudoInst)))))))));
}
-class VPseudoBinary<VReg RetClass,
- VReg Op1Class,
- DAGOperand Op2Class> :
+// The destination vector register group for a masked vector instruction cannot
+// overlap the source mask register (v0), unless the destination vector register
+// is being written with a mask value (e.g., comparisons) or the scalar result
+// of a reduction.
+class GetVRegNoV0<VReg VRegClass> {
+ VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
+ !eq(VRegClass, VRM2) : VRM2NoV0,
+ !eq(VRegClass, VRM4) : VRM4NoV0,
+ !eq(VRegClass, VRM8) : VRM8NoV0,
+ !eq(1, 1) : VRegClass);
+}
+
+class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
+ Pseudo<outs, ins, []>, RISCVVPseudo {
+ let BaseInstr = instr;
+ let VLMul = m.value;
+}
+
+class VPseudoBinaryNoMask<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class> :
Pseudo<(outs RetClass:$rd),
(ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
let Uses = [VL, VTYPE];
let VLIndex = 3;
let SEWIndex = 4;
- let MergeOpIndex = -1;
+ let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoBinaryMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class> :
- Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge,
+ Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+ (ins GetVRegNoV0<RetClass>.R:$merge,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
let Constraints = "$rd = $merge";
let Uses = [VL, VTYPE];
let VLIndex = 5;
@@ -171,75 +241,181 @@ multiclass VPseudoBinary<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo> {
- def "_" # MInfo.MX : VPseudoBinary<RetClass, Op1Class, Op2Class>;
- def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class>;
+ let VLMul = MInfo.value in {
+ def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class>;
+ def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class>;
+ }
}
multiclass VPseudoBinaryV_VV {
- let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in
foreach m = MxList.m in
- {
- let VLMul = m.value in
defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m>;
- }
}
multiclass VPseudoBinaryV_VX {
- let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in
foreach m = MxList.m in
- {
- let VLMul = m.value in
defm _VX : VPseudoBinary<m.vrclass, m.vrclass, GPR, m>;
- }
}
multiclass VPseudoBinaryV_VI<Operand ImmType = simm5> {
- let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in
foreach m = MxList.m in
- {
- let VLMul = m.value in
defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m>;
- }
}
-multiclass VPseudoBinary_VV_VX_VI<Operand ImmType = simm5> {
+multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VX;
defm "" : VPseudoBinaryV_VI<ImmType>;
}
+multiclass VPseudoBinaryV_VV_VX {
+ defm "" : VPseudoBinaryV_VV;
+ defm "" : VPseudoBinaryV_VX;
+}
+
+multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
+ defm "" : VPseudoBinaryV_VX;
+ defm "" : VPseudoBinaryV_VI<ImmType>;
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the
diff erent patterns.
//===----------------------------------------------------------------------===//
-
-multiclass pat_vop_binary<SDNode vop,
- string instruction_name,
- ValueType result_type,
- ValueType op_type,
- ValueType mask_type,
- int sew,
- LMULInfo vlmul,
- VReg RetClass,
- VReg op_reg_class,
- bit swap = 0>
+class VPatBinarySDNode<SDNode vop,
+ string instruction_name,
+ ValueType result_type,
+ ValueType op_type,
+ ValueType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg RetClass,
+ VReg op_reg_class> :
+ Pat<(result_type (vop
+ (op_type op_reg_class:$rs1),
+ (op_type op_reg_class:$rs2))),
+ (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX)
+ op_reg_class:$rs1,
+ op_reg_class:$rs2,
+ VLMax, sew)>;
+
+multiclass VPatBinarySDNode<SDNode vop, string instruction_name>
{
- defvar instruction = !cast<Instruction>(instruction_name#"_VV_"# vlmul.MX);
- def : Pat<(result_type (vop
- (op_type op_reg_class:$rs1),
- (op_type op_reg_class:$rs2))),
- (instruction op_reg_class:$rs1,
- op_reg_class:$rs2,
- VLMax, sew)>;
-}
-
-multiclass pat_vop_binary_common<SDNode vop,
- string instruction_name,
- list<VTypeInfo> vtilist>
+ foreach vti = AllIntegerVectors in
+ def : VPatBinarySDNode<vop, instruction_name,
+ vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.LMul, vti.RegClass, vti.RegClass>;
+}
+
+class VPatBinaryNoMask<string intrinsic_name,
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg op1_reg_class,
+ DAGOperand op2_kind> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ (XLenVT GPR:$vl))),
+ (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
+ (op1_type op1_reg_class:$rs1),
+ ToFPR32<op2_type, op2_kind, "rs2">.ret,
+ (NoX0 GPR:$vl), sew)>;
+
+class VPatBinaryMask<string intrinsic_name,
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ ValueType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg result_reg_class,
+ VReg op1_reg_class,
+ DAGOperand op2_kind> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
+ (result_type result_reg_class:$merge),
+ (op1_type op1_reg_class:$rs1),
+ (op2_type op2_kind:$rs2),
+ (mask_type V0),
+ (XLenVT GPR:$vl))),
+ (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
+ (result_type result_reg_class:$merge),
+ (op1_type op1_reg_class:$rs1),
+ ToFPR32<op2_type, op2_kind, "rs2">.ret,
+ (mask_type V0), (NoX0 GPR:$vl), sew)>;
+
+multiclass VPatBinary<string intrinsic,
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ ValueType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg result_reg_class,
+ VReg op1_reg_class,
+ DAGOperand op2_kind>
{
+ def : VPatBinaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
+ sew, vlmul, op1_reg_class, op2_kind>;
+ def : VPatBinaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
+ mask_type, sew, vlmul, result_reg_class, op1_reg_class,
+ op2_kind>;
+}
+
+multiclass VPatBinaryV_VV<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist> {
+ foreach vti = vtilist in
+ defm : VPatBinary<intrinsic, instruction, "VV",
+ vti.Vector, vti.Vector, vti.Vector, vti.Mask,
+ vti.SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, vti.RegClass>;
+}
+
+multiclass VPatBinaryV_VX<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist> {
+ foreach vti = vtilist in
+ defm : VPatBinary<intrinsic, instruction, "VX",
+ vti.Vector, vti.Vector, XLenVT, vti.Mask,
+ vti.SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, GPR>;
+}
+
+multiclass VPatBinaryV_VI<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist, Operand imm_type> {
foreach vti = vtilist in
- defm : pat_vop_binary<vop, instruction_name,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
- vti.LMul, vti.RegClass, vti.RegClass>;
+ defm : VPatBinary<intrinsic, instruction, "VI",
+ vti.Vector, vti.Vector, XLenVT, vti.Mask,
+ vti.SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, imm_type>;
+}
+
+multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist>
+{
+ defm "" : VPatBinaryV_VV<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
+}
+
+multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist>
+{
+ defm "" : VPatBinaryV_VV<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
+}
+
+multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist>
+{
+ defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
}
//===----------------------------------------------------------------------===//
@@ -248,6 +424,16 @@ multiclass pat_vop_binary_common<SDNode vop,
let Predicates = [HasStdExtV] in {
+//===----------------------------------------------------------------------===//
+// Pseudo Instructions for CodeGen
+//===----------------------------------------------------------------------===//
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
+ def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>;
+ def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>;
+ def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>;
+ def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>;
+}
+
//===----------------------------------------------------------------------===//
// 6. Configuration-Setting Instructions
//===----------------------------------------------------------------------===//
@@ -341,6 +527,10 @@ foreach vti = AllVectors in
vti.SEW, vti.LMul, vti.RegClass>;
}
+//===----------------------------------------------------------------------===//
+// Pseudo Instructions
+//===----------------------------------------------------------------------===//
+
//===----------------------------------------------------------------------===//
// 12. Vector Integer Arithmetic Instructions
//===----------------------------------------------------------------------===//
@@ -348,11 +538,26 @@ foreach vti = AllVectors in
//===----------------------------------------------------------------------===//
// 12.1. Vector Single-Width Integer Add and Subtract
//===----------------------------------------------------------------------===//
+defm PseudoVADD : VPseudoBinaryV_VV_VX_VI;
+defm PseudoVSUB : VPseudoBinaryV_VV_VX;
+defm PseudoVRSUB : VPseudoBinaryV_VX_VI;
-// Pseudo instructions.
-defm PseudoVADD : VPseudoBinary_VV_VX_VI;
+//===----------------------------------------------------------------------===//
+// Patterns.
+//===----------------------------------------------------------------------===//
// Whole-register vector patterns.
-defm "" : pat_vop_binary_common<add, "PseudoVADD", AllIntegerVectors>;
+defm "" : VPatBinarySDNode<add, "PseudoVADD">;
+
+//===----------------------------------------------------------------------===//
+// 12. Vector Integer Arithmetic Instructions
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// 12.1. Vector Single-Width Integer Add and Subtract
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
+defm "" : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
+defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
} // Predicates = [HasStdExtV]
diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index b1aacfe878b9..4fd060a65014 100644
--- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -177,9 +177,9 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
OutMI.addOperand(MCOp);
}
- // Unmasked pseudo instructions define MergeOpIndex to -1.
- // Append dummy mask operand to V instructions.
- if (RVV->getMergeOpIndex() == -1)
+ // Unmasked pseudo instructions need to append dummy mask operand to
+ // V instructions. All V instructions are modeled as the masked version.
+ if (RVV->hasDummyMask())
OutMI.addOperand(MCOperand::createReg(RISCV::NoRegister));
return true;
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 9bf4453097c8..ea7357c9c073 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -413,14 +413,27 @@ def VRM2 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
(add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
V18M2, V20M2, V22M2, V24M2, V0M2, V2M2, V4M2, V6M2), 2>;
+def VRM2NoV0 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
+ vfloat32m2_t, vfloat64m2_t],
+ (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
+ V18M2, V20M2, V22M2, V24M2, V2M2, V4M2, V6M2), 2>;
+
def VRM4 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
vfloat32m4_t, vfloat64m4_t],
(add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V0M4, V4M4), 4>;
+def VRM4NoV0 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
+ vfloat32m4_t, vfloat64m4_t],
+ (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V4M4), 4>;
+
def VRM8 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
vfloat32m8_t, vfloat64m8_t],
(add V8M8, V16M8, V24M8, V0M8), 8>;
+def VRM8NoV0 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
+ vfloat32m8_t, vfloat64m8_t],
+ (add V8M8, V16M8, V24M8), 8>;
+
defvar VMaskVTs = [vbool64_t, vbool32_t, vbool16_t, vbool8_t,
vbool4_t, vbool2_t, vbool1_t];
diff --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
index 4451707890b8..15c63750a1b9 100644
--- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
@@ -409,6 +409,10 @@ void printVType(unsigned VType, raw_ostream &OS);
namespace RISCVVPseudosTable {
+// The definition should be consistent with `class RISCVVPseudo` in
+// RISCVInstrInfoVPseudos.td.
+static const uint8_t InvalidIndex = 0x80;
+
struct PseudoInfo {
unsigned int Pseudo;
unsigned int BaseInstr;
@@ -416,12 +420,15 @@ struct PseudoInfo {
uint8_t SEWIndex;
uint8_t MergeOpIndex;
uint8_t VLMul;
+ bool HasDummyMask;
int getVLIndex() const { return static_cast<int8_t>(VLIndex); }
int getSEWIndex() const { return static_cast<int8_t>(SEWIndex); }
int getMergeOpIndex() const { return static_cast<int8_t>(MergeOpIndex); }
+
+ bool hasDummyMask() const { return HasDummyMask; }
};
using namespace RISCV;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
new file mode 100644
index 000000000000..59a5f9f684d3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
@@ -0,0 +1,1945 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
new file mode 100644
index 000000000000..490ab42d4683
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
@@ -0,0 +1,2377 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
new file mode 100644
index 000000000000..b5d3a539a0bb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
@@ -0,0 +1,1225 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
new file mode 100644
index 000000000000..8d3dbdffaaae
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
@@ -0,0 +1,1497 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
new file mode 100644
index 000000000000..1d005ecbe46c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
@@ -0,0 +1,1441 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
new file mode 100644
index 000000000000..6e1a22e4aff0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
@@ -0,0 +1,1761 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
More information about the llvm-commits
mailing list