[llvm] [RISCV] Select mask operands as virtual registers and eliminate vmv0 (PR #125026)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 29 20:32:14 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-globalisel
Author: Luke Lau (lukel97)
<details>
<summary>Changes</summary>
This is another attempt at #<!-- -->88496 to keep mask operands in SSA after instruction selection.
Previously we selected the mask operands into vmv0, a singleton register class with exactly one register, V0.
But the register allocator doesn't really support singleton register classes and we ran into errors like "ran out of registers during register allocation in function".
This avoids this by introducing a pass just before register allocation that converts any use of vmv0 to a copy to $v0, i.e. what isel currently does today.
That way the register allocator doesn't need to deal with the singleton register class, but get the benefits of having the mask registers in SSA throughout the backend:
- This allows RISCVVLOptimizer to reduce the VLs of instructions that define mask registers
- It enables CSE and code sinking in more places
- It removes the need to peek through mask copies in RISCVISelDAGToDAG and keep track of V0 defs in RISCVVectorPeephole
As a follow up, we can move the elimination pass to after phi elimination and outside of SSA, which would unblock the pre-RA scheduler around masked pseudos. This might also help the issue that RISCVVectorMaskDAGMutation tries to solve.
---
Patch is 354.26 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/125026.diff
52 Files Affected:
- (modified) llvm/lib/Target/RISCV/CMakeLists.txt (+1)
- (modified) llvm/lib/Target/RISCV/RISCV.h (+3)
- (modified) llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (+15-92)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (+46-46)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td (+18-18)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td (+210-210)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td (+35-35)
- (modified) llvm/lib/Target/RISCV/RISCVTargetMachine.cpp (+3)
- (added) llvm/lib/Target/RISCV/RISCVVMV0Elimination.cpp (+154)
- (modified) llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp (+25-40)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir (+44-66)
- (modified) llvm/test/CodeGen/RISCV/O0-pipeline.ll (+1)
- (modified) llvm/test/CodeGen/RISCV/O3-pipeline.ll (+1)
- (modified) llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll (+8-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/commutable.ll (+23-46)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll (+8-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll (+43-33)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll (+641-312)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll (+17-17)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll (+16-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/floor-vp.ll (+8-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll (+20-50)
- (modified) llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll (+23-23)
- (modified) llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll (+42-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/round-vp.ll (+8-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll (+8-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll (+8-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir (+29-51)
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll (+52-100)
- (modified) llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll (+52-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll (+84-94)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll (+52-57)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vpload.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll (+36-32)
- (modified) llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll (+47-36)
``````````diff
diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index 98d3615ebab58dd..9b23a5ab521c8d0 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -63,6 +63,7 @@ add_llvm_target(RISCVCodeGen
RISCVVectorMaskDAGMutation.cpp
RISCVVectorPeephole.cpp
RISCVVLOptimizer.cpp
+ RISCVVMV0Elimination.cpp
RISCVZacasABIFix.cpp
GISel/RISCVCallLowering.cpp
GISel/RISCVInstructionSelector.cpp
diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index b1aee98739e8521..851eea135285246 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -107,6 +107,9 @@ void initializeRISCVPreLegalizerCombinerPass(PassRegistry &);
FunctionPass *createRISCVVLOptimizerPass();
void initializeRISCVVLOptimizerPass(PassRegistry &);
+
+FunctionPass *createRISCVVMV0EliminationPass();
+void initializeRISCVVMV0EliminationPass(PassRegistry &);
} // namespace llvm
#endif
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9855028ead9e208..c79539a7e4d20bc 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -241,7 +241,6 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
bool IsLoad, MVT *IndexVT) {
SDValue Chain = Node->getOperand(0);
- SDValue Glue;
Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
@@ -252,11 +251,8 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
}
if (IsMasked) {
- // Mask needs to be copied to V0.
SDValue Mask = Node->getOperand(CurOp++);
- Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
- Glue = Chain.getValue(1);
- Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ Operands.push_back(Mask);
}
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
@@ -278,8 +274,6 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
}
Operands.push_back(Chain); // Chain.
- if (Glue)
- Operands.push_back(Glue);
}
void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked,
@@ -1831,19 +1825,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
return;
}
- // Mask needs to be copied to V0.
- SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
- RISCV::V0, Mask, SDValue());
- SDValue Glue = Chain.getValue(1);
- SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
-
if (IsCmpConstant) {
SDValue Imm =
selectImm(CurDAG, SDLoc(Src2), XLenVT, CVal - 1, *Subtarget);
ReplaceNode(Node, CurDAG->getMachineNode(
VMSGTMaskOpcode, DL, VT,
- {MaskedOff, Src1, Imm, V0, VL, SEW, Glue}));
+ {MaskedOff, Src1, Imm, Mask, VL, SEW}));
return;
}
@@ -1854,7 +1842,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// the agnostic result can be either undisturbed or all 1.
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
- {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
+ {MaskedOff, Src1, Src2, Mask, VL, SEW}),
0);
// vmxor.mm vd, vd, v0 is used to update active value.
ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
@@ -3274,12 +3262,10 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo,
return false;
assert(RISCVII::hasVLOp(TSFlags));
- bool HasGlueOp = User->getGluedNode() != nullptr;
- unsigned ChainOpIdx = User->getNumOperands() - HasGlueOp - 1;
+ unsigned ChainOpIdx = User->getNumOperands() - 1;
bool HasChainOp = User->getOperand(ChainOpIdx).getValueType() == MVT::Other;
bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TSFlags);
- unsigned VLIdx =
- User->getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
+ unsigned VLIdx = User->getNumOperands() - HasVecPolicyOp - HasChainOp - 2;
const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1);
if (UserOpNo == VLIdx)
@@ -3739,43 +3725,7 @@ bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
return false;
}
-// After ISel, a vector pseudo's mask will be copied to V0 via a CopyToReg
-// that's glued to the pseudo. This tries to look up the value that was copied
-// to V0.
-static SDValue getMaskSetter(SDValue MaskOp, SDValue GlueOp) {
- // Check that we're using V0 as a mask register.
- if (!isa<RegisterSDNode>(MaskOp) ||
- cast<RegisterSDNode>(MaskOp)->getReg() != RISCV::V0)
- return SDValue();
-
- // The glued user defines V0.
- const auto *Glued = GlueOp.getNode();
-
- if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
- return SDValue();
-
- // Check that we're defining V0 as a mask register.
- if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
- cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
- return SDValue();
-
- SDValue MaskSetter = Glued->getOperand(2);
-
- // Sometimes the VMSET is wrapped in a COPY_TO_REGCLASS, e.g. if the mask came
- // from an extract_subvector or insert_subvector.
- if (MaskSetter->isMachineOpcode() &&
- MaskSetter->getMachineOpcode() == RISCV::COPY_TO_REGCLASS)
- MaskSetter = MaskSetter->getOperand(0);
-
- return MaskSetter;
-}
-
-static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp) {
- // Check the instruction defining V0; it needs to be a VMSET pseudo.
- SDValue MaskSetter = getMaskSetter(MaskOp, GlueOp);
- if (!MaskSetter)
- return false;
-
+static bool usesAllOnesMask(SDValue MaskOp) {
const auto IsVMSet = [](unsigned Opc) {
return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
@@ -3786,14 +3736,7 @@ static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp) {
// TODO: Check that the VMSET is the expected bitwidth? The pseudo has
// undefined behaviour if it's the wrong bitwidth, so we could choose to
// assume that it's all-ones? Same applies to its VL.
- return MaskSetter->isMachineOpcode() &&
- IsVMSet(MaskSetter.getMachineOpcode());
-}
-
-// Return true if we can make sure mask of N is all-ones mask.
-static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx) {
- return usesAllOnesMask(N->getOperand(MaskOpIdx),
- N->getOperand(N->getNumOperands() - 1));
+ return MaskOp->isMachineOpcode() && IsVMSet(MaskOp.getMachineOpcode());
}
static bool isImplicitDef(SDValue V) {
@@ -3809,9 +3752,7 @@ static bool isImplicitDef(SDValue V) {
}
// Optimize masked RVV pseudo instructions with a known all-ones mask to their
-// corresponding "unmasked" pseudo versions. The mask we're interested in will
-// take the form of a V0 physical register operand, with a glued
-// register-setting instruction.
+// corresponding "unmasked" pseudo versions.
bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) {
const RISCV::RISCVMaskedPseudoInfo *I =
RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
@@ -3819,7 +3760,7 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) {
return false;
unsigned MaskOpIdx = I->MaskOpIdx;
- if (!usesAllOnesMask(N, MaskOpIdx))
+ if (!usesAllOnesMask(N->getOperand(MaskOpIdx)))
return false;
// There are two classes of pseudos in the table - compares and
@@ -3843,18 +3784,13 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) {
// Skip the passthru operand at index 0 if the unmasked don't have one.
bool ShouldSkip = !HasPassthru && MaskedHasPassthru;
for (unsigned I = ShouldSkip, E = N->getNumOperands(); I != E; I++) {
- // Skip the mask, and the Glue.
+ // Skip the mask
SDValue Op = N->getOperand(I);
- if (I == MaskOpIdx || Op.getValueType() == MVT::Glue)
+ if (I == MaskOpIdx)
continue;
Ops.push_back(Op);
}
- // Transitively apply any node glued to our new node.
- const auto *Glued = N->getGluedNode();
- if (auto *TGlued = Glued->getGluedNode())
- Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
-
MachineSDNode *Result =
CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
@@ -3890,17 +3826,13 @@ static bool IsVMerge(SDNode *N) {
// The resulting policy is the effective policy the vmerge would have had,
// i.e. whether or not it's passthru operand was implicit-def.
bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
- SDValue Passthru, False, True, VL, Mask, Glue;
+ SDValue Passthru, False, True, VL, Mask;
assert(IsVMerge(N));
Passthru = N->getOperand(0);
False = N->getOperand(1);
True = N->getOperand(2);
Mask = N->getOperand(3);
VL = N->getOperand(4);
- // We always have a glue node for the mask at v0.
- Glue = N->getOperand(N->getNumOperands() - 1);
- assert(cast<RegisterSDNode>(Mask)->getReg() == RISCV::V0);
- assert(Glue.getValueType() == MVT::Glue);
// If the EEW of True is different from vmerge's SEW, then we can't fold.
if (True.getSimpleValueType() != N->getSimpleValueType(0))
@@ -3943,12 +3875,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
if (TII->get(TrueOpc).hasUnmodeledSideEffects())
return false;
- // The last operand of a masked instruction may be glued.
- bool HasGlueOp = True->getGluedNode() != nullptr;
-
- // The chain operand may exist either before the glued operands or in the last
- // position.
- unsigned TrueChainOpIdx = True.getNumOperands() - HasGlueOp - 1;
+ unsigned TrueChainOpIdx = True.getNumOperands() - 1;
bool HasChainOp =
True.getOperand(TrueChainOpIdx).getValueType() == MVT::Other;
@@ -3960,7 +3887,6 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
LoopWorklist.push_back(False.getNode());
LoopWorklist.push_back(Mask.getNode());
LoopWorklist.push_back(VL.getNode());
- LoopWorklist.push_back(Glue.getNode());
if (SDNode::hasPredecessorHelper(True.getNode(), Visited, LoopWorklist))
return false;
}
@@ -3968,7 +3894,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
// The vector policy operand may be present for masked intrinsics
bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TrueTSFlags);
unsigned TrueVLIndex =
- True.getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
+ True.getNumOperands() - HasVecPolicyOp - HasChainOp - 2;
SDValue TrueVL = True.getOperand(TrueVLIndex);
SDValue SEW = True.getOperand(TrueVLIndex + 1);
@@ -4000,7 +3926,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
if (RISCVII::elementsDependOnVL(TrueBaseMCID.TSFlags) && (TrueVL != VL))
return false;
if (RISCVII::elementsDependOnMask(TrueBaseMCID.TSFlags) &&
- (Mask && !usesAllOnesMask(Mask, Glue)))
+ (Mask && !usesAllOnesMask(Mask)))
return false;
// Make sure it doesn't raise any observable fp exceptions, since changing the
@@ -4057,9 +3983,6 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
if (HasChainOp)
Ops.push_back(True.getOperand(TrueChainOpIdx));
- // Add the glue for the CopyToReg of mask->v0.
- Ops.push_back(Glue);
-
MachineSDNode *Result =
CurDAG->getMachineNode(MaskedOpc, DL, True->getVTList(), Ops);
Result->setFlags(True->getFlags());
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 268bfe70673a2ac..46cf27838d1ce23 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3945,7 +3945,7 @@ class VPatUnaryMask<string intrinsic_name,
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(
!if(isSEWAware,
@@ -3953,7 +3953,7 @@ class VPatUnaryMask<string intrinsic_name,
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
(result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
- (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+ (mask_type VMV0:$vm), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
class VPatUnaryMaskRoundingMode<string intrinsic_name,
string inst,
@@ -3969,7 +3969,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(
@@ -3978,7 +3978,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
(result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, log2sew, (XLenVT timm:$policy))>;
@@ -3996,7 +3996,7 @@ class VPatUnaryMaskRTZ<string intrinsic_name,
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT 0b001),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(
@@ -4005,7 +4005,7 @@ class VPatUnaryMaskRTZ<string intrinsic_name,
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
(result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
GPR:$vl, log2sew, (XLenVT timm:$policy))>;
class VPatMaskUnaryNoMask<string intrinsic_name,
@@ -4024,12 +4024,12 @@ class VPatMaskUnaryMask<string intrinsic_name,
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
(mti.Mask VR:$passthru),
(mti.Mask VR:$rs2),
- (mti.Mask V0),
+ (mti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
(mti.Mask VR:$passthru),
(mti.Mask VR:$rs2),
- (mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
+ (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW, TU_MU)>;
class VPatUnaryAnyMask<string intrinsic,
string inst,
@@ -4144,13 +4144,13 @@ class VPatBinaryMask<string intrinsic_name,
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew)>;
class VPatBinaryMaskPolicy<string intrinsic_name,
string inst,
@@ -4166,13 +4166,13 @@ class VPatBinaryMaskPolicy<string intrinsic_name,
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
+ (mask_type VMV0:$vm), GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatBinaryMaskPolicyRoundingMode<string intrinsic_name,
string inst,
@@ -4188,14 +4188,14 @@ class VPatBinaryMaskPolicyRoundingMode<string intrinsic_name,
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4214,13 +4214,13 @@ class VPatBinaryMaskSwapped<string intrinsic_name,
(result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew)>;
class VPatTiedBinaryNoMask<string intrinsic_name,
string inst,
@@ -4306,12 +4306,12 @@ class VPatTiedBinaryMask<string intrinsic_name,
(result_type result_reg_class:$passthru),
(result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
(result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
+ (mask_type VMV0:$vm), GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
string inst,
@@ -4325,13 +4325,13 @@ class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
(result_type result_reg_class:$passthru),
(result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
(result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4447,13 +4447,13 @@ class VPatTernaryMaskPolicy<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/125026
More information about the llvm-commits
mailing list