[llvm] abb5a98 - [RISCV] Support mask policy for RVV IR intrinsics.
Zakk Chen via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 22 01:28:13 PDT 2022
Author: Zakk Chen
Date: 2022-03-22T01:19:16-07:00
New Revision: abb5a985e96acda93901fabf0f5be5b8a4422374
URL: https://github.com/llvm/llvm-project/commit/abb5a985e96acda93901fabf0f5be5b8a4422374
DIFF: https://github.com/llvm/llvm-project/commit/abb5a985e96acda93901fabf0f5be5b8a4422374.diff
LOG: [RISCV] Support mask policy for RVV IR intrinsics.
Add the UsesMaskPolicy flag to indicate the operations result
would be effected by the mask policy. (ex. mask operations).
It means RISCVInsertVSETVLI should decide the mask policy according
by mask policy operand or passthru operand.
If UsesMaskPolicy is false (ex. unmasked, store, and reduction operations),
the mask policy could be either mask undisturbed or agnostic.
Currently, RISCVInsertVSETVLI sets UsesMaskPolicy operations default to
MA, otherwise to MU to keep the current mask policy would not be changed
for unmasked operations.
Add masked-tama, masked-tamu, masked-tuma and masked-tumu test cases.
I didn't add all operations because most of implementations are using
the same pseudo multiclass. Some tests maybe be duplicated in different
tests. (ex. masked vmacc with tumu shows in vmacc-rv32.ll and masked-tumu)
I think having different tests only for policy would make the testing
clear.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D120226
Added:
llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
llvm/lib/Target/RISCV/RISCVInstrFormats.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 652ae976e85d7..9c8a320d11023 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -219,7 +219,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For unit stride load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
+ // Input: (maskedoff, pointer, mask, vl, policy)
class RISCVUSLoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
@@ -231,7 +231,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For unit stride fault-only-first load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
+ // Input: (maskedoff, pointer, mask, vl, policy)
// Output: (data, vl)
// NOTE: We model this with default memory properties since we model writing
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
@@ -255,7 +255,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For strided load with mask
- // Input: (maskedoff, pointer, stride, mask, vl, ta)
+ // Input: (maskedoff, pointer, stride, mask, vl, policy)
class RISCVSLoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
@@ -277,7 +277,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For indexed load with mask
- // Input: (maskedoff, pointer, index, mask, vl, ta)
+ // Input: (maskedoff, pointer, index, mask, vl, policy)
class RISCVILoadMask
: Intrinsic<[llvm_anyvector_ty ],
[LLVMMatchType<0>,
@@ -358,7 +358,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For destination vector type is the same as first source vector (with mask).
- // Input: (vector_in, mask, vl, ta)
+ // Input: (vector_in, vector_in, mask, vl, policy)
class RISCVUnaryAAMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
@@ -367,7 +367,8 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
- class RISCVUnaryAAMaskTU
+ // Input: (passthru, vector_in, vector_in, mask, vl)
+ class RISCVCompress
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
@@ -392,7 +393,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
- // Input: (vector_in, vector_in, int_vector_in, vl, ta)
+ // Input: (vector_in, vector_in, int_vector_in, vl, policy)
class RISCVRGatherVVMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
@@ -411,7 +412,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first and second source vector.
- // Input: (vector_in, vector_in, int16_vector_in, vl, ta)
+ // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
class RISCVRGatherEI16VVMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
@@ -426,14 +427,14 @@ let TargetPrefix = "riscv" in {
// Input: (passthru, vector_in, xlen_in, vl)
class RISCVGatherVXNoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
LLVMMatchType<1>],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
// Second operand is XLen.
- // Input: (maskedoff, vector_in, xlen_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
class RISCVGatherVXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
@@ -453,7 +454,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is the same as first source vector (with mask).
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@@ -475,7 +476,7 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryAAShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@@ -495,7 +496,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 3;
}
// For destination vector type is NOT the same as first source vector (with mask).
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryABXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
@@ -517,7 +518,7 @@ let TargetPrefix = "riscv" in {
}
// For destination vector type is NOT the same as first source vector (with mask).
// The second source operand must match the destination type or be an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVBinaryABShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
@@ -615,7 +616,7 @@ let TargetPrefix = "riscv" in {
}
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@@ -639,7 +640,7 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryAAShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
@@ -662,7 +663,7 @@ let TargetPrefix = "riscv" in {
// For Saturating binary operations with mask.
// The destination vector type is NOT the same as first source vector (with mask).
// The second source operand matches the destination type or is an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVSaturatingBinaryABShiftMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
@@ -671,6 +672,7 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
let VLOperand = 4;
}
+ // Input: (vector_in, vector_in, vector_in/scalar_in, vl)
class RISCVTernaryAAAXNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
@@ -678,6 +680,7 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
+ // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVTernaryAAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
@@ -686,6 +689,7 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 4;
}
+ // NoMask Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryAAXANoMask
: Intrinsic<[llvm_anyvector_ty],
@@ -695,6 +699,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 3;
}
+ // Mask Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
class RISCVTernaryAAXAMask
: Intrinsic<[llvm_anyvector_ty],
@@ -705,6 +710,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 4;
}
+ // NoMask Widening Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryWideNoMask
: Intrinsic< [llvm_anyvector_ty],
@@ -714,6 +720,7 @@ let TargetPrefix = "riscv" in {
let ScalarOperand = 1;
let VLOperand = 3;
}
+ // Mask Widening Vector Multiply-Add operations, its first operand can not be undef.
// Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
class RISCVTernaryWideMask
: Intrinsic< [llvm_anyvector_ty],
@@ -772,7 +779,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For destination vector type is NOT the same as source vector (with mask).
- // Input: (maskedoff, vector_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, mask, vl, policy)
class RISCVUnaryABMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
@@ -824,7 +831,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For Conversion unary operations with mask.
- // Input: (maskedoff, vector_in, mask, vl, ta)
+ // Input: (maskedoff, vector_in, mask, vl, policy)
class RISCVConversionMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty,
@@ -844,7 +851,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 1;
}
// For unit stride segment load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
+ // Input: (maskedoff, pointer, mask, vl, policy)
class RISCVUSSegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
@@ -870,7 +877,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 1;
}
// For unit stride fault-only-first segment load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
+ // Input: (maskedoff, pointer, mask, vl, policy)
// Output: (data, vl)
// NOTE: We model this with default memory properties since we model writing
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
@@ -896,7 +903,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For stride segment load with mask
- // Input: (maskedoff, pointer, offset, mask, vl, ta)
+ // Input: (maskedoff, pointer, offset, mask, vl, policy)
class RISCVSSegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
@@ -920,7 +927,7 @@ let TargetPrefix = "riscv" in {
let VLOperand = 2;
}
// For indexed segment load with mask
- // Input: (maskedoff, pointer, index, mask, vl, ta)
+ // Input: (maskedoff, pointer, index, mask, vl, policy)
class RISCVISegLoadMask<int nf>
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
!add(nf, -1))),
@@ -1360,7 +1367,7 @@ let TargetPrefix = "riscv" in {
defm vrgather_vx : RISCVRGatherVX;
defm vrgatherei16_vv : RISCVRGatherEI16VV;
- def "int_riscv_vcompress" : RISCVUnaryAAMaskTU;
+ def "int_riscv_vcompress" : RISCVCompress;
defm vaaddu : RISCVSaturatingBinaryAAX;
defm vaadd : RISCVSaturatingBinaryAAX;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index 51afe1a5676c9..868e761eb3fdc 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -88,6 +88,13 @@ enum {
// Pseudos.
IsRVVWideningReductionShift = HasVecPolicyOpShift + 1,
IsRVVWideningReductionMask = 1 << IsRVVWideningReductionShift,
+
+ // Does this instruction care about mask policy. If it is not, the mask policy
+ // could be either agnostic or undisturbed. For example, unmasked, store, and
+ // reduction operations result would not be affected by mask policy, so
+ // compiler has free to select either one.
+ UsesMaskPolicyShift = IsRVVWideningReductionShift + 1,
+ UsesMaskPolicyMask = 1 << UsesMaskPolicyShift,
};
// Match with the definitions in RISCVInstrFormatsV.td
@@ -110,8 +117,8 @@ enum VLMUL : uint8_t {
};
enum {
- TAIL_UNDISTURBED = 0,
TAIL_AGNOSTIC = 1,
+ MASK_AGNOSTIC = 2,
};
// Helper functions to read TSFlags.
@@ -156,6 +163,10 @@ static inline bool hasVecPolicyOp(uint64_t TSFlags) {
static inline bool isRVVWideningReduction(uint64_t TSFlags) {
return TSFlags & IsRVVWideningReductionMask;
}
+/// \returns true if mask policy is valid for the instruction.
+static inline bool UsesMaskPolicy(uint64_t TSFlags) {
+ return TSFlags & UsesMaskPolicyMask;
+}
// RISC-V Specific Machine Operand Flags
enum {
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index f564af38f2d71..260d48c4d62e9 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -502,29 +502,46 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
unsigned NumOperands = MI.getNumExplicitOperands();
bool HasPolicy = RISCVII::hasVecPolicyOp(TSFlags);
- // Default to tail agnostic unless the destination is tied to a source.
- // Unless the source is undef. In that case the user would have some control
- // over the tail values. Some pseudo instructions force a tail agnostic policy
- // despite having a tied def.
+ // If the instruction has policy argument, use the argument.
+ // If there is no policy argument, default to tail agnostic unless the
+ // destination is tied to a source. Unless the source is undef. In that case
+ // the user would have some control over the policy values. Some pseudo
+ // instructions force a tail agnostic policy despite having a tied def.
bool ForceTailAgnostic = RISCVII::doesForceTailAgnostic(TSFlags);
bool TailAgnostic = true;
- // If the instruction has policy argument, use the argument.
+ bool UsesMaskPolicy = RISCVII::UsesMaskPolicy(TSFlags);
+ // FIXME: Could we look at the above or below instructions to choose the
+ // matched mask policy to reduce vsetvli instructions? Default mask policy is
+ // agnostic if instructions use mask policy, otherwise is undisturbed. Because
+ // most mask operations are mask undisturbed, so we could possibly reduce the
+ // vsetvli between mask and nomasked instruction sequence.
+ bool MaskAgnostic = UsesMaskPolicy;
+ unsigned UseOpIdx;
if (HasPolicy) {
const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
- TailAgnostic = Op.getImm() & 0x1;
- }
-
- unsigned UseOpIdx;
- if (!(ForceTailAgnostic || (HasPolicy && TailAgnostic)) &&
- MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
+ uint64_t Policy = Op.getImm();
+ assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
+ "Invalid Policy Value");
+ // Although in some cases, mismatched passthru/maskedoff with policy value
+ // does not make sense (ex. tied operand is IMPLICIT_DEF with non-TAMA
+ // policy, or tied operand is not IMPLICIT_DEF with TAMA policy), but users
+ // have set the policy value explicitly, so compiler would not fix it.
+ TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
+ MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
+ } else if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
TailAgnostic = false;
+ if (UsesMaskPolicy)
+ MaskAgnostic = false;
// If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg());
if (UseMI) {
UseMI = elideCopies(UseMI, MRI);
- if (UseMI && UseMI->isImplicitDef())
+ if (UseMI && UseMI->isImplicitDef()) {
TailAgnostic = true;
+ if (UsesMaskPolicy)
+ MaskAgnostic = true;
+ }
}
}
@@ -559,8 +576,8 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
}
} else
InstrInfo.setAVLReg(RISCV::NoRegister);
- InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
- /*MaskAgnostic*/ false, MaskRegOp, StoreOp, ScalarMovOp);
+ InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, MaskRegOp, StoreOp,
+ ScalarMovOp);
return InstrInfo;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index f99d0f56c4065..18b31f85bfdb4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -196,7 +196,10 @@ class RVInst<dag outs, dag ins, string opcodestr, string argstr,
let TSFlags{16} = HasVecPolicyOp;
bit IsRVVWideningReduction = 0;
- let TSFlags{17} = IsRVVWideningReduction;
+ let TSFlags{17} = IsRVVWideningReduction;
+
+ bit UsesMaskPolicy = 0;
+ let TSFlags{18} = UsesMaskPolicy;
}
// Pseudo instructions
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index ed3c29f2ae751..406d30ac73360 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -685,6 +685,7 @@ class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -733,6 +734,7 @@ class VPseudoSLoadMask<VReg RetClass, int EEW>:
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -787,6 +789,7 @@ class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -912,6 +915,7 @@ class VPseudoNullaryMask<VReg RegClass>:
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -973,6 +977,7 @@ class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -989,6 +994,7 @@ class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -1164,6 +1170,7 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -1184,6 +1191,9 @@ class VPseudoBinaryMOutMask<VReg RetClass,
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
+ // FIXME: In current design, we would not change the mask policy, so
+ // UsesMaskPolicy is false. We could fix after add the policy operand.
+ let UsesMaskPolicy = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -1206,6 +1216,7 @@ class VPseudoTiedBinaryMask<VReg RetClass,
let HasSEWOp = 1;
let HasMergeOp = 0; // Merge is also rs2.
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -1325,6 +1336,7 @@ class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -1358,6 +1370,7 @@ class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@@ -1397,6 +1410,7 @@ class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
+ let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
new file mode 100644
index 0000000000000..c0284f152ffc1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -0,0 +1,1269 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i1> %1, iXLen %2, iXLen* %3) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+ %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %3
+
+ ret <vscale x 1 x i64> %b
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i64>* %0,
+ iXLen %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x iXLen>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8>* %0, <vscale x 1 x iXLen> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x iXLen> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vwadd.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ i8 -9,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v9, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v9, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf8 v9, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vnsra.wv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vwmul.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmacc.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vwmacc.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vnclip.wv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfwadd.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfwmul.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfmacc.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfwmacc.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3);
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v8, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfrec7.v v8, v8, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: ret
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.xu.f.v v8, v8, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.xu.f.v v9, v8, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vfwcvt.f.x.v v9, v8, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vfncvt.xu.f.w v9, v8, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.x.w v9, v8, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfncvt.f.f.w v9, v8, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ iXLen %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vrgather.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vrgatherei16.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 3)
+
+ ret <vscale x 1 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
new file mode 100644
index 0000000000000..396cd81898fbc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
@@ -0,0 +1,1198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+ %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x i64> %b
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x iXLen>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x iXLen> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 -9,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vzext.vf2 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vzext.vf8 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
new file mode 100644
index 0000000000000..c9ca9cd0ca2f0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
@@ -0,0 +1,1198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+ %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x i64> %b
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, ma
+; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x iXLen>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x iXLen> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 -9,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vzext.vf2 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vzext.vf8 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
+; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 2)
+
+ ret <vscale x 1 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
new file mode 100644
index 0000000000000..bd0f24aa7862a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
@@ -0,0 +1,1198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh\
+; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV32-NEXT: vle64ff.v v8, (a0), v0.t
+; RV32-NEXT: csrr a0, vl
+; RV32-NEXT: sw a0, 0(a2)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT: vle64ff.v v8, (a0), v0.t
+; RV64-NEXT: csrr a0, vl
+; RV64-NEXT: sd a0, 0(a2)
+; RV64-NEXT: ret
+entry:
+ %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+ %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+ %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+ store iXLen %c, iXLen* %4
+
+ ret <vscale x 1 x i64> %b
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu
+; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x iXLen>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x iXLen> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 -9,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vzext.vf2 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vzext.vf8 v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfsqrt.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfrec7.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %0,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
+; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfclass.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen, iXLen)
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen)
+define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 0)
+
+ ret <vscale x 1 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
deleted file mode 100644
index 4436a39dac78d..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-policy.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
-
-declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vadd.vv v8, v8, v9
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vadd_mask_tu(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vadd_mask_tu:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 0)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vadd_mask_ta(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vadd_mask_ta:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
More information about the llvm-commits
mailing list