[llvm] [RISCV] Don't use V0 directly in patterns (PR #88496)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Sun Apr 14 19:27:42 PDT 2024
lukel97 wrote:
> @lukel97 didn't you try this previously?
Yeah, the issue I ran into was due to MachineCSE constraining operands to VMV0, which sometimes meant we had an earlyclobber constraint on an instruction with two VMV0s, e.g.:
early-clobber %res:vr = PseudoVMSEQ_VV_M2_MASK %mask:vmv0(tied-def 0), ..., %mask:vmv0, ...
So regalloc would fail because it can't choose two separate registers from VMV0: https://github.com/llvm/llvm-project/pull/75347
I discussed this offline with @MatzeB and @arsenm and I think the consensus was that singleton register classes weren't really a thing on other targets and the conventional thing to do would be to use a virtual reg to physical reg copy.
But looking at the premerge CI for this PR, the crash doesn't seem to be appearing? Specifically I think this affected the compare instructions i.e. vmseq. Did something change with MachineCSE in the meantime? Digging out my old patch it seems to be doing the same thing:
<details><summary>Details</summary>
<p>
```diff
diff --git a/llvm/lib/CodeGen/MachineCSE.cpp b/llvm/lib/CodeGen/MachineCSE.cpp
index 89c4562e8d38..8a3893411e0b 100644
--- a/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/llvm/lib/CodeGen/MachineCSE.cpp
@@ -192,6 +192,21 @@ bool MachineCSE::PerformTrivialCopyPropagation(MachineInstr *MI,
continue;
if (DefMI->getOperand(0).getSubReg())
continue;
+
+
+ bool Foo = false;
+ for (MachineOperand &DefMO : MI->defs()) {
+ if (DefMO.isEarlyClobber() && DefMO.isTied()) {
+ MachineOperand &TiedMO =
+ MI->getOperand(MI->findTiedOperandIdx(DefMO.getOperandNo()));
+ if (TiedMO.getReg() == SrcReg) {
+ Foo = true;
+ break;
+ }
+ }
+ }
+ if (Foo)
+ continue;
// FIXME: We should trivially coalesce subregister copies to expose CSE
// opportunities on instructions with truncated operands (see
// cse-add-with-overflow.ll). This can be done here as follows:
@@ -208,6 +223,7 @@ bool MachineCSE::PerformTrivialCopyPropagation(MachineInstr *MI,
continue;
if (!MRI->constrainRegAttrs(SrcReg, Reg))
continue;
+
LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
LLVM_DEBUG(dbgs() << "*** to: " << *MI);
diff --git a/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp b/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
index 43e8d1096021..76352bfb04f2 100644
--- a/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
@@ -66,11 +66,11 @@ public:
StringRef getPassName() const override { return "RISC-V Fold Masks"; }
private:
- bool convertToUnmasked(MachineInstr &MI, MachineInstr *MaskDef);
- bool foldVMergeIntoOps(MachineInstr &MI, MachineInstr *MaskDef);
- bool convertVMergeToVMv(MachineInstr &MI, MachineInstr *MaskDef);
+ bool convertToUnmasked(MachineInstr &MI);
+ bool foldVMergeIntoOps(MachineInstr &MI);
+ bool convertVMergeToVMv(MachineInstr &MI);
- bool isAllOnesMask(MachineInstr *MaskDef);
+ bool isAllOnesMask(const MachineOperand &MaskOp);
};
} // namespace
@@ -79,14 +79,15 @@ char RISCVFoldMasks::ID = 0;
INITIALIZE_PASS(RISCVFoldMasks, DEBUG_TYPE, "RISC-V Fold Masks", false, false)
-bool RISCVFoldMasks::isAllOnesMask(MachineInstr *MaskDef) {
- if (!MaskDef)
- return false;
- assert(MaskDef->isCopy() && MaskDef->getOperand(0).getReg() == RISCV::V0);
- Register SrcReg = TRI->lookThruCopyLike(MaskDef->getOperand(1).getReg(), MRI);
+bool RISCVFoldMasks::isAllOnesMask(const MachineOperand &MaskOp) {
+ assert(MaskOp.isReg() && MaskOp.getReg().isVirtual());
+ // if (!MaskDef)
+ // return false;
+ // assert(MaskDef->isCopy() && MaskDef->getOperand(0).getReg() == RISCV::V0);
+ Register SrcReg = TRI->lookThruCopyLike(MaskOp.getReg(), MRI);
if (!SrcReg.isVirtual())
return false;
- MaskDef = MRI->getVRegDef(SrcReg);
+ MachineInstr *MaskDef = MRI->getVRegDef(SrcReg);
if (!MaskDef)
return false;
@@ -142,8 +143,7 @@ static unsigned getVMSetForLMul(RISCVII::VLMUL LMUL) {
// not the pseudo name. That is, a TA VMERGE_VVM can be either the _TU pseudo
// form with an IMPLICIT_DEF passthrough operand or the unsuffixed (TA) pseudo
// form.
-bool RISCVFoldMasks::foldVMergeIntoOps(MachineInstr &MI,
- MachineInstr *MaskDef) {
+bool RISCVFoldMasks::foldVMergeIntoOps(MachineInstr &MI) {
MachineOperand *True;
MachineOperand *Merge;
MachineOperand *False;
@@ -181,7 +181,7 @@ bool RISCVFoldMasks::foldVMergeIntoOps(MachineInstr &MI,
bool HasTiedDest = RISCVII::isFirstDefTiedToFirstUse(TrueMCID);
const bool MIIsMasked =
- BaseOpc == RISCV::VMERGE_VVM && !isAllOnesMask(MaskDef);
+ BaseOpc == RISCV::VMERGE_VVM && !isAllOnesMask(MI.getOperand(4));
bool TrueIsMasked = false;
const RISCV::RISCVMaskedPseudoInfo *Info =
RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
@@ -299,23 +299,21 @@ bool RISCVFoldMasks::foldVMergeIntoOps(MachineInstr &MI,
// Set the merge to the false operand of the merge.
TrueMI.getOperand(1).setReg(False->getReg());
- bool NeedToMoveOldMask = TrueIsMasked;
// If we're converting it to a masked pseudo, reuse MI's mask.
if (!TrueIsMasked) {
- if (BaseOpc == RISCV::VMV_V_V) {
+ Register MaskReg;
+ if (BaseOpc == RISCV::VMERGE_VVM) {
+ MaskReg = MI.getOperand(4).getReg();
+ } else {
// If MI is a vmv.v.v, it won't have a mask operand. So insert an all-ones
// mask just before True.
unsigned VMSetOpc =
getVMSetForLMul(RISCVII::getLMul(MI.getDesc().TSFlags));
- Register Dest = MRI->createVirtualRegister(&RISCV::VRRegClass);
+ MaskReg = MRI->createVirtualRegister(&RISCV::VRRegClass);
BuildMI(*MI.getParent(), TrueMI, MI.getDebugLoc(), TII->get(VMSetOpc),
- Dest)
+ MaskReg)
.add(VL)
.add(TrueMI.getOperand(RISCVII::getSEWOpNum(TrueMCID)));
- BuildMI(*MI.getParent(), TrueMI, MI.getDebugLoc(), TII->get(RISCV::COPY),
- RISCV::V0)
- .addReg(Dest);
- NeedToMoveOldMask = true;
}
TrueMI.setDesc(MaskedMCID);
@@ -323,7 +321,7 @@ bool RISCVFoldMasks::foldVMergeIntoOps(MachineInstr &MI,
// TODO: Increment MaskOpIdx by number of explicit defs in tablegen?
unsigned MaskOpIdx = Info->MaskOpIdx + TrueMI.getNumExplicitDefs();
TrueMI.insert(&TrueMI.getOperand(MaskOpIdx),
- MachineOperand::CreateReg(RISCV::V0, false));
+ MachineOperand::CreateReg(MaskReg, false));
}
// Update the AVL.
@@ -357,23 +355,17 @@ bool RISCVFoldMasks::foldVMergeIntoOps(MachineInstr &MI,
MRI->replaceRegWith(MI.getOperand(0).getReg(), TrueMI.getOperand(0).getReg());
- // We need to move the old mask copy to after MI if:
- // - TrueMI is masked and we are using its mask instead
- // - We created a new all ones mask that clobbers V0
- if (NeedToMoveOldMask && MaskDef) {
- assert(MaskDef->getParent() == MI.getParent());
- MaskDef->removeFromParent();
- MI.getParent()->insertAfter(MI.getIterator(), MaskDef);
- }
-
MI.eraseFromParent();
+// TODO: Is this still needed?
+ // if (IsMasked)
+ // MaskDef->eraseFromParent();
return true;
}
// Transform (VMERGE_VVM_<LMUL> false, false, true, allones, vl, sew) to
// (VMV_V_V_<LMUL> false, true, vl, sew). It may decrease uses of VMSET.
-bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI, MachineInstr *V0Def) {
+bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI) {
#define CASE_VMERGE_TO_VMV(lmul) \
case RISCV::PseudoVMERGE_VVM_##lmul: \
NewOpc = RISCV::PseudoVMV_V_V_##lmul; \
@@ -398,8 +390,7 @@ bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI, MachineInstr *V0Def) {
TRI->lookThruCopyLike(FalseReg, MRI))
return false;
- assert(MI.getOperand(4).isReg() && MI.getOperand(4).getReg() == RISCV::V0);
- if (!isAllOnesMask(V0Def))
+ if (!isAllOnesMask(MI.getOperand(4)))
return false;
MI.setDesc(TII->get(NewOpc));
@@ -416,14 +407,13 @@ bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI, MachineInstr *V0Def) {
return true;
}
-bool RISCVFoldMasks::convertToUnmasked(MachineInstr &MI,
- MachineInstr *MaskDef) {
+bool RISCVFoldMasks::convertToUnmasked(MachineInstr &MI) {
const RISCV::RISCVMaskedPseudoInfo *I =
RISCV::getMaskedPseudoInfo(MI.getOpcode());
if (!I)
return false;
- if (!isAllOnesMask(MaskDef))
+ if (!isAllOnesMask(MI.getOperand(I->MaskOpIdx + MI.getNumExplicitDefs())))
return false;
// There are two classes of pseudos in the table - compares and
@@ -475,32 +465,17 @@ bool RISCVFoldMasks::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
- // Masked pseudos coming out of isel will have their mask operand in the form:
- //
- // $v0:vr = COPY %mask:vr
- // %x:vr = Pseudo_MASK %a:vr, %b:br, $v0:vr
- //
- // Because $v0 isn't in SSA, keep track of it so we can check the mask operand
- // on each pseudo.
- MachineInstr *CurrentV0Def;
for (MachineBasicBlock &MBB : MF) {
- CurrentV0Def = nullptr;
for (MachineInstr &MI : make_early_inc_range(MBB)) {
// TODO: We can remove this if we handle TA merge in foldVMergeIntoOps.
- Changed |= convertToUnmasked(MI, CurrentV0Def);
+ Changed |= convertToUnmasked(MI);
- Changed |= foldVMergeIntoOps(MI, CurrentV0Def);
- if (MI.definesRegister(RISCV::V0, TRI))
- CurrentV0Def = &MI;
+ Changed |= foldVMergeIntoOps(MI);
}
- CurrentV0Def = nullptr;
for (MachineInstr &MI : MBB) {
- Changed |= convertToUnmasked(MI, CurrentV0Def);
- Changed |= convertVMergeToVMv(MI, CurrentV0Def);
-
- if (MI.definesRegister(RISCV::V0, TRI))
- CurrentV0Def = &MI;
+ Changed |= convertToUnmasked(MI);
+ Changed |= convertVMergeToVMv(MI);
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 2136f8aac7a9..8f85c93d899a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -299,13 +299,8 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
*IndexVT = Operands.back()->getSimpleValueType(0);
}
- if (IsMasked) {
- // Mask needs to be copied to V0.
- SDValue Mask = Node->getOperand(CurOp++);
- Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
- Glue = Chain.getValue(1);
- Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
- }
+ if (IsMasked)
+ Operands.push_back(Node->getOperand(CurOp++));
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
@@ -1571,21 +1566,16 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
return;
}
- // Mask needs to be copied to V0.
- SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
- RISCV::V0, Mask, SDValue());
- SDValue Glue = Chain.getValue(1);
- SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
-
// Otherwise use
// vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
// The result is mask undisturbed.
// We use the same instructions to emulate mask agnostic behavior, because
// the agnostic result can be either undisturbed or all 1.
- SDValue Cmp = SDValue(
- CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
- {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
- 0);
+ SDValue Cmp =
+ SDValue(CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
+ {MaskedOff, Src1, Src2, Mask, VL, SEW,
+ CurDAG->getEntryNode()}),
+ 0);
// vmxor.mm vd, vd, v0 is used to update active value.
ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
{Cmp, Mask, VL, MaskSEW}));
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 127d3080491d..f75212ab8d39 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -4020,12 +4020,12 @@ class VPatUnaryMask<string intrinsic_name,
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
- (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
+ (mask_type VMV0:$vm), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
class VPatUnaryMaskRoundingMode<string intrinsic_name,
string inst,
@@ -4041,7 +4041,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(
@@ -4050,7 +4050,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, log2sew, (XLenVT timm:$policy))>;
@@ -4072,12 +4072,12 @@ class VPatMaskUnaryMask<string intrinsic_name,
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
- (mti.Mask V0),
+ (mti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
- (mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
+ (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW, TU_MU)>;
class VPatUnaryAnyMask<string intrinsic,
string inst,
@@ -4213,13 +4213,13 @@ class VPatBinaryMask<string intrinsic_name,
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew)>;
class VPatBinaryMaskTA<string intrinsic_name,
string inst,
@@ -4235,13 +4235,13 @@ class VPatBinaryMaskTA<string intrinsic_name,
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
+ (mask_type VMV0:$vm), GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatBinaryMaskTARoundingMode<string intrinsic_name,
string inst,
@@ -4257,14 +4257,14 @@ class VPatBinaryMaskTARoundingMode<string intrinsic_name,
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4283,13 +4283,13 @@ class VPatBinaryMaskSwapped<string intrinsic_name,
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew)>;
class VPatTiedBinaryNoMask<string intrinsic_name,
string inst,
@@ -4375,12 +4375,12 @@ class VPatTiedBinaryMask<string intrinsic_name,
(result_type result_reg_class:$merge),
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
+ (mask_type VMV0:$vm), GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
string inst,
@@ -4394,13 +4394,13 @@ class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
(result_type result_reg_class:$merge),
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4534,13 +4534,13 @@ class VPatTernaryMask<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
- (mask_type V0),
+ (mask_type VMV0:$vm),
GPR:$vl, sew)>;
class VPatTernaryMaskPolicy<string intrinsic,
@@ -4559,13 +4559,13 @@ class VPatTernaryMaskPolicy<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
- (mask_type V0),
+ (mask_type VMV0:$vm),
GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatTernaryMaskPolicyRoundingMode<string intrinsic,
@@ -4584,14 +4584,14 @@ class VPatTernaryMaskPolicyRoundingMode<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4611,13 +4611,13 @@ class VPatTernaryMaskTA<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
- (mask_type V0),
+ (mask_type VMV0:$vm),
GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
class VPatTernaryMaskTARoundingMode<string intrinsic,
@@ -4636,14 +4636,14 @@ class VPatTernaryMaskTARoundingMode<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
- (mask_type V0),
+ (mask_type VMV0:$vm),
(XLenVT timm:$round),
GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
@@ -4655,9 +4655,9 @@ multiclass VPatUnaryS_M<string intrinsic_name,
(!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
GPR:$vl, mti.Log2SEW)>;
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
- (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
+ (mti.Mask VR:$rs1), (mti.Mask VMV0:$vm), VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
- (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
+ (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW)>;
}
}
@@ -4745,9 +4745,9 @@ multiclass VPatNullaryV<string intrinsic, string instruction> {
vti.RegClass:$merge, GPR:$vl, vti.Log2SEW, TU_MU)>;
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
(vti.Vector vti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
+ (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
- vti.RegClass:$merge, (vti.Mask V0),
+ vti.RegClass:$merge, (vti.Mask VMV0:$vm),
GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
}
}
@@ -4847,13 +4847,13 @@ multiclass VPatBinaryCarryInTAIL<string intrinsic,
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew)>;
}
multiclass VPatBinaryCarryIn<string intrinsic,
@@ -4870,12 +4870,12 @@ multiclass VPatBinaryCarryIn<string intrinsic,
def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew)>;
}
multiclass VPatBinaryMaskOut<string intrinsic,
@@ -6070,10 +6070,10 @@ multiclass VPatCompare_VI<string intrinsic, string inst,
def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar ImmType:$rs2),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
- (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
}
}
@@ -6230,14 +6230,14 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
GPR:$vl,
vti.Log2SEW,
(XLenVT timm:$policy))>;
@@ -6255,14 +6255,14 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs1,
(NegImm simm5_plus1:$rs2),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
GPR:$vl,
vti.Log2SEW,
(XLenVT timm:$policy))>;
@@ -6918,14 +6918,14 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs1),
(XLenVT 1),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs1,
vti.RegClass:$rs1,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
GPR:$vl,
vti.Log2SEW,
(XLenVT timm:$policy))>;
@@ -7249,9 +7249,9 @@ foreach fvti = AllFloatVectors in {
def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge),
(fvti.Vector fvti.RegClass:$rs2),
(fvti.Scalar (fpimm0)),
- (fvti.Mask V0), VLOpFrag)),
+ (fvti.Mask VMV0:$vm), VLOpFrag)),
(instr fvti.RegClass:$merge, fvti.RegClass:$rs2, 0,
- (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index b7c845703794..8d99ff57630a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1102,24 +1102,24 @@ defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">;
// 11.15. Vector Integer Merge Instructions
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1,
+ def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), vti.RegClass:$rs1,
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)),
- vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0),
+ vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask VMV0:$vm),
vti.AVL, vti.Log2SEW)>;
- def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1),
+ def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1),
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)),
- vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>;
+ vti.RegClass:$rs2, GPR:$rs1, (vti.Mask VMV0:$vm), vti.AVL, vti.Log2SEW)>;
- def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1),
+ def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)),
- vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>;
+ vti.RegClass:$rs2, simm5:$rs1, (vti.Mask VMV0:$vm), vti.AVL, vti.Log2SEW)>;
}
}
@@ -1360,30 +1360,30 @@ defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
foreach fvti = AllFloatVectors in {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
let Predicates = GetVTypePredicates<ivti>.Predicates in {
- def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1,
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1,
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
(fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
+ fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
fvti.AVL, fvti.Log2SEW)>;
- def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
(fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
+ fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
}
let Predicates = GetVTypePredicates<fvti>.Predicates in
- def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
+ def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
(SplatFPOp fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
(fvti.Vector (IMPLICIT_DEF)),
fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
- (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
+ (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
}
// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index dc6b57fad321..bd9dbfa9f1c3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -612,7 +612,7 @@ class VPatBinaryVL_V<SDPatternOperator vop,
(op1_type op1_reg_class:$rs1),
(op2_type op2_reg_class:$rs2),
(result_type result_reg_class:$merge),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
@@ -621,7 +621,7 @@ class VPatBinaryVL_V<SDPatternOperator vop,
result_reg_class:$merge,
op1_reg_class:$rs1,
op2_reg_class:$rs2,
- (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+ (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
class VPatBinaryVL_V_RM<SDPatternOperator vop,
string instruction_name,
@@ -640,7 +640,7 @@ class VPatBinaryVL_V_RM<SDPatternOperator vop,
(op1_type op1_reg_class:$rs1),
(op2_type op2_reg_class:$rs2),
(result_type result_reg_class:$merge),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
@@ -649,7 +649,7 @@ class VPatBinaryVL_V_RM<SDPatternOperator vop,
result_reg_class:$merge,
op1_reg_class:$rs1,
op2_reg_class:$rs2,
- (mask_type V0),
+ (mask_type VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -747,7 +747,7 @@ class VPatBinaryVL_XI<SDPatternOperator vop,
(vop1_type vop_reg_class:$rs1),
(vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))),
(result_type result_reg_class:$merge),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
@@ -756,7 +756,7 @@ class VPatBinaryVL_XI<SDPatternOperator vop,
result_reg_class:$merge,
vop_reg_class:$rs1,
xop_kind:$rs2,
- (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+ (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name,
list<VTypeInfo> vtilist = AllIntegerVectors,
@@ -867,7 +867,7 @@ class VPatBinaryVL_VF<SDPatternOperator vop,
: Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
(vop2_type (SplatFPOp scalar_reg_class:$rs2)),
(result_type result_reg_class:$merge),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
@@ -876,7 +876,7 @@ class VPatBinaryVL_VF<SDPatternOperator vop,
result_reg_class:$merge,
vop_reg_class:$rs1,
scalar_reg_class:$rs2,
- (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+ (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
class VPatBinaryVL_VF_RM<SDPatternOperator vop,
string instruction_name,
@@ -893,7 +893,7 @@ class VPatBinaryVL_VF_RM<SDPatternOperator vop,
: Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
(vop2_type (SplatFPOp scalar_reg_class:$rs2)),
(result_type result_reg_class:$merge),
- (mask_type V0),
+ (mask_type VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
@@ -902,7 +902,7 @@ class VPatBinaryVL_VF_RM<SDPatternOperator vop,
result_reg_class:$merge,
vop_reg_class:$rs1,
scalar_reg_class:$rs2,
- (mask_type V0),
+ (mask_type VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -947,7 +947,7 @@ multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name,
def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
fvti.RegClass:$rs1,
(fvti.Vector fvti.RegClass:$merge),
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
@@ -955,7 +955,7 @@ multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name,
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
fvti.RegClass:$merge,
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
- (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -966,7 +966,7 @@ multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name
def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
fvti.RegClass:$rs1,
(fvti.Vector fvti.RegClass:$merge),
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
@@ -974,7 +974,7 @@ multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
fvti.RegClass:$merge,
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -987,13 +987,13 @@ multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
vti.RegClass:$rs2, cc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
VR:$merge,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
}
// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped.
@@ -1003,11 +1003,11 @@ multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_nam
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2),
vti.RegClass:$rs1, invcc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
VR:$merge, vti.RegClass:$rs1,
- vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name,
@@ -1016,17 +1016,17 @@ multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_nam
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat (XLenVT GPR:$rs2)), cc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$merge, vti.RegClass:$rs1,
- GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1), invcc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$merge, vti.RegClass:$rs1,
- GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
@@ -1035,20 +1035,20 @@ multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_nam
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat_simm5 simm5:$rs2), cc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$merge, vti.RegClass:$rs1,
- XLenVT:$rs2, (vti.Mask V0), GPR:$vl,
+ XLenVT:$rs2, (vti.Mask VMV0:$vm), GPR:$vl,
vti.Log2SEW)>;
// FIXME: Can do some canonicalization to remove these patterns.
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$merge, vti.RegClass:$rs1,
- simm5:$rs2, (vti.Mask V0), GPR:$vl,
+ simm5:$rs2, (vti.Mask VMV0:$vm), GPR:$vl,
vti.Log2SEW)>;
}
@@ -1060,20 +1060,20 @@ multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti,
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(splatpat_kind simm5:$rs2), cc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$merge, vti.RegClass:$rs1,
- (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
+ (DecImm simm5:$rs2), (vti.Mask VMV0:$vm), GPR:$vl,
vti.Log2SEW)>;
// FIXME: Can do some canonicalization to remove these patterns.
def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
VR:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$merge, vti.RegClass:$rs1,
- (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
+ (DecImm simm5:$rs2), (vti.Mask VMV0:$vm), GPR:$vl,
vti.Log2SEW)>;
}
@@ -1086,31 +1086,31 @@ multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
fvti.RegClass:$rs2,
cc,
VR:$merge,
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK")
VR:$merge, fvti.RegClass:$rs1,
- fvti.RegClass:$rs2, (fvti.Mask V0),
+ fvti.RegClass:$rs2, (fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
(SplatFPOp fvti.ScalarRegClass:$rs2),
cc,
VR:$merge,
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
VR:$merge, fvti.RegClass:$rs1,
- fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
+ fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc,
VR:$merge,
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
VR:$merge, fvti.RegClass:$rs1,
- fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
+ fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW)>;
}
}
@@ -1124,11 +1124,11 @@ multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix,
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<fti>.Predicates) in
def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2),
- (fti.Mask V0), VLOpFrag)),
+ (fti.Mask VMV0:$vm), VLOpFrag)),
(!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK")
(vti.Vector (IMPLICIT_DEF)),
fti.RegClass:$rs2,
- (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (fti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
@@ -1140,11 +1140,11 @@ multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> {
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<ivti>.Predicates) in
def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
(ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
- (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>;
+ (fvti.Mask VMV0:$vm), GPR:$vl, ivti.Log2SEW, TA_MA)>;
}
}
@@ -1154,11 +1154,11 @@ multiclass VPatConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_name
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<ivti>.Predicates) in
def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
(ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1173,11 +1173,11 @@ multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_nam
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<ivti>.Predicates) in
def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
- (fvti.Mask V0), (XLenVT timm:$frm),
+ (fvti.Mask VMV0:$vm), (XLenVT timm:$frm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
(ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
- (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW,
+ (fvti.Mask VMV0:$vm), timm:$frm, GPR:$vl, ivti.Log2SEW,
TA_MA)>;
}
}
@@ -1188,11 +1188,11 @@ multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<ivti>.Predicates) in
def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
- (ivti.Mask V0),
+ (ivti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
(fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
- (ivti.Mask V0),
+ (ivti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1206,11 +1206,11 @@ multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<ivti>.Predicates) in
def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
- (ivti.Mask V0), (XLenVT timm:$frm),
+ (ivti.Mask VMV0:$vm), (XLenVT timm:$frm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
(fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
- (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
+ (ivti.Mask VMV0:$vm), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
}
}
@@ -1223,11 +1223,11 @@ multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name>
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<iwti>.Predicates) in
def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
(iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
- (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MA)>;
}
}
@@ -1238,11 +1238,11 @@ multiclass VPatWConvertFP2IVL_V_RM<SDPatternOperator vop, string instruction_nam
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<iwti>.Predicates) in
def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
(iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1258,11 +1258,11 @@ multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> {
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<iwti>.Predicates) in
def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
- (fvti.Mask V0), (XLenVT timm:$frm),
+ (fvti.Mask VMV0:$vm), (XLenVT timm:$frm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
(iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
- (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
+ (fvti.Mask VMV0:$vm), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
}
}
@@ -1274,11 +1274,11 @@ multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop,
let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates,
GetVTypePredicates<fwti>.Predicates) in
def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
- (ivti.Mask V0),
+ (ivti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
(fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
- (ivti.Mask V0),
+ (ivti.Mask VMV0:$vm),
GPR:$vl, ivti.Log2SEW, TA_MA)>;
}
}
@@ -1295,11 +1295,11 @@ multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop,
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<fwti>.Predicates) in
def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
- (fwti.Mask V0),
+ (fwti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
(vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
- (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (fwti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
@@ -1313,11 +1313,11 @@ multiclass VPatNConvertFP2IVL_W_RM<SDPatternOperator vop,
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<fwti>.Predicates) in
def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
- (fwti.Mask V0),
+ (fwti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
(vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
- (fwti.Mask V0),
+ (fwti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1332,11 +1332,11 @@ multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> {
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<fwti>.Predicates) in
def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
- (fwti.Mask V0), (XLenVT timm:$frm),
+ (fwti.Mask VMV0:$vm), (XLenVT timm:$frm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
(vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
- (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (fwti.Mask VMV0:$vm), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
@@ -1348,11 +1348,11 @@ multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop,
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<iwti>.Predicates) in
def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1),
- (iwti.Mask V0),
+ (iwti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
(fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1,
- (iwti.Mask V0),
+ (iwti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1367,11 +1367,11 @@ multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> {
let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
GetVTypePredicates<iwti>.Predicates) in
def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1),
- (iwti.Mask V0), (XLenVT timm:$frm),
+ (iwti.Mask VMV0:$vm), (XLenVT timm:$frm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
(fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1,
- (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
+ (iwti.Mask VMV0:$vm), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
}
}
@@ -1381,13 +1381,13 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
(vti.Vector vti.RegClass:$rs1), VR:$rs2,
- (vti.Mask V0), VLOpFrag,
+ (vti.Mask VMV0:$vm), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
(vti_m1.Vector VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
}
}
}
@@ -1398,13 +1398,13 @@ multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float>
let Predicates = GetVTypePredicates<vti>.Predicates in {
def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
(vti.Vector vti.RegClass:$rs1), VR:$rs2,
- (vti.Mask V0), VLOpFrag,
+ (vti.Mask VMV0:$vm), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
(vti_m1.Vector VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1463,11 +1463,11 @@ multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_n
GetVTypePredicates<wti>.Predicates) in {
def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
- VR:$rs2, (vti.Mask V0), VLOpFrag,
+ VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
(wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
- (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ (wti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
}
}
@@ -1482,11 +1482,11 @@ multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instructio
GetVTypePredicates<wti>.Predicates) in {
def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
- VR:$rs2, (vti.Mask V0), VLOpFrag,
+ VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
(wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
- (wti_m1.Vector VR:$rs2), (vti.Mask V0),
+ (wti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1505,11 +1505,11 @@ multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instru
GetVTypePredicates<wti>.Predicates) in {
def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
- VR:$rs2, (vti.Mask V0), VLOpFrag,
+ VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
(wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
- (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ (wti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
}
}
@@ -1524,11 +1524,11 @@ multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string ins
GetVTypePredicates<wti>.Predicates) in {
def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
- VR:$rs2, (vti.Mask V0), VLOpFrag,
+ VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
(wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
- (wti_m1.Vector VR:$rs2), (vti.Mask V0),
+ (wti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1651,10 +1651,10 @@ multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1),
(vti.Mask true_mask), VLOpFrag)),
srcvalue, (vti.Mask true_mask), VLOpFrag),
- (vti.Mask V0), VLOpFrag)),
+ (vti.Mask VMV0:$vm), VLOpFrag)),
(!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK")
(vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
@@ -1697,7 +1697,7 @@ multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
foreach vti = AllIntegerVectors in {
defvar suffix = vti.LMul.MX;
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
+ def : Pat<(riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(vti.Vector (op vti.RegClass:$rd,
(riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2,
srcvalue, (vti.Mask true_mask), VLOpFrag),
@@ -1705,8 +1705,8 @@ multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>;
+ def : Pat<(riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(vti.Vector (op vti.RegClass:$rd,
(riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2,
srcvalue, (vti.Mask true_mask), VLOpFrag),
@@ -1714,8 +1714,8 @@ multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(riscv_vselect_vl (vti.Mask V0),
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>;
+ def : Pat<(riscv_vselect_vl (vti.Mask VMV0:$vm),
(vti.Vector (op vti.RegClass:$rd,
(riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2,
srcvalue, (vti.Mask true_mask), VLOpFrag),
@@ -1723,8 +1723,8 @@ multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
- def : Pat<(riscv_vselect_vl (vti.Mask V0),
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ def : Pat<(riscv_vselect_vl (vti.Mask VMV0:$vm),
(vti.Vector (op vti.RegClass:$rd,
(riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2,
srcvalue, (vti.Mask true_mask), VLOpFrag),
@@ -1732,7 +1732,7 @@ multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
}
@@ -1746,17 +1746,17 @@ multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> {
def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
(wti.Vector wti.RegClass:$rd),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK")
wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1),
(vti.Vector vti.RegClass:$rs2),
(wti.Vector wti.RegClass:$rd),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK")
wti.RegClass:$rd, vti.ScalarRegClass:$rs1,
- vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
}
}
@@ -1789,19 +1789,19 @@ multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name>
defvar suffix = vti.LMul.MX;
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd,
- vti.RegClass:$rs2, (vti.Mask V0),
+ vti.RegClass:$rs2, (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1),
vti.RegClass:$rd, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
}
@@ -1811,11 +1811,11 @@ multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_nam
defvar suffix = vti.LMul.MX;
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd,
- vti.RegClass:$rs2, (vti.Mask V0),
+ vti.RegClass:$rs2, (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1823,11 +1823,11 @@ multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_nam
def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1),
vti.RegClass:$rd, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1840,34 +1840,34 @@ multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> {
foreach vti = AllFloatVectors in {
defvar suffix = vti.LMul.MX;
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
+ def : Pat<(riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>;
+ def : Pat<(riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(riscv_vselect_vl (vti.Mask V0),
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>;
+ def : Pat<(riscv_vselect_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
- def : Pat<(riscv_vselect_vl (vti.Mask V0),
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ def : Pat<(riscv_vselect_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
}
@@ -1876,46 +1876,46 @@ multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> {
foreach vti = AllFloatVectors in {
defvar suffix = vti.LMul.MX;
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
+ def : Pat<(riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
+ def : Pat<(riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(riscv_vselect_vl (vti.Mask V0),
+ def : Pat<(riscv_vselect_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
- def : Pat<(riscv_vselect_vl (vti.Mask V0),
+ def : Pat<(riscv_vselect_vl (vti.Mask VMV0:$vm),
(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
vti.RegClass:$rd, VLOpFrag),
(!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1932,18 +1932,18 @@ multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> {
GetVTypePredicates<wti>.Predicates) in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
- (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
+ (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK")
wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
(vti.Vector vti.RegClass:$rs2),
- (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
+ (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK")
wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
}
@@ -1956,22 +1956,22 @@ multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name> {
GetVTypePredicates<wti>.Predicates) in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
- (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
+ (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK")
wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
GPR:$vl, vti.Log2SEW, TA_MA)>;
def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
(vti.Vector vti.RegClass:$rs2),
- (wti.Vector wti.RegClass:$rd), (vti.Mask V0),
+ (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK")
wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -1985,20 +1985,20 @@ multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd),
(vti.Vector vti.RegClass:$rs1),
- uimm5:$rs2, (vti.Mask V0),
+ uimm5:$rs2, (vti.Mask VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd),
(vti.Vector vti.RegClass:$rs1),
- GPR:$rs2, (vti.Mask V0),
+ GPR:$rs2, (vti.Mask VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK")
vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
}
}
@@ -2009,10 +2009,10 @@ multiclass VPatSlide1VL_VX<SDNode vop, string instruction_name> {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
- GPR:$rs2, (vti.Mask V0), VLOpFrag)),
+ GPR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK")
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>;
}
}
}
@@ -2022,10 +2022,10 @@ multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
- vti.Scalar:$rs2, (vti.Mask V0), VLOpFrag)),
+ vti.Scalar:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK")
vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>;
}
}
}
@@ -2045,16 +2045,16 @@ foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
(vti.Vector vti.RegClass:$rs1),
- vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$merge, (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
(vti.Vector vti.RegClass:$rs1),
- vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$merge, (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2072,22 +2072,22 @@ foreach vtiToWti = AllWidenableIntVectors in {
GetVTypePredicates<wti>.Predicates) in {
def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse
(vti.Vector vti.RegClass:$rs1),
- (vti.Mask V0), VLOpFrag)),
+ (vti.Mask VMV0:$vm), VLOpFrag)),
(wti.Vector (riscv_vmv_v_x_vl
(wti.Vector undef), 1, VLOpFrag)),
- wti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ wti.RegClass:$merge, (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse
(vti.Vector vti.RegClass:$rs1),
- (vti.Mask V0), VLOpFrag)),
+ (vti.Mask VMV0:$vm), VLOpFrag)),
(wti.Vector (riscv_vmv_v_x_vl
(wti.Vector undef), 1, VLOpFrag)),
- wti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ wti.RegClass:$merge, (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2148,11 +2148,11 @@ foreach vtiTowti = AllWidenableIntVectors in {
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in
def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK")
(vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
// 11.8. Vector Integer Comparison Instructions
@@ -2239,63 +2239,63 @@ foreach vtiTowti = AllWidenableIntVectors in {
def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat XLenVT:$rs2),
(wti.Vector wti.RegClass:$rd),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK")
wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
// 11.15. Vector Integer Merge Instructions
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
+ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
vti.RegClass:$rs1,
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)),
- vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0),
+ vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask VMV0:$vm),
GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
+ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat XLenVT:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)),
- vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rs2, GPR:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
+ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)),
- vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rs2, simm5:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
+ def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask VMV0:$vm),
vti.RegClass:$rs1,
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs2, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
+ def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(SplatPat XLenVT:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs2, GPR:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
+ def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask VMV0:$vm),
(SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs2, simm5:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
}
}
@@ -2388,39 +2388,39 @@ defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE,
foreach vti = AllFloatVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
// 13.8. Vector Floating-Point Square-Root Instruction
- def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0),
+ def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK")
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
GPR:$vl, vti.Log2SEW, TA_MA)>;
// 13.12. Vector Floating-Point Sign-Injection Instructions
- def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
+ def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_MASK")
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
- vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ vti.RegClass:$rs, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
TA_MA)>;
// Handle fneg with VFSGNJN using the same input for both operands.
- def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
+ def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK")
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
- vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ vti.RegClass:$rs, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
TA_MA)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1,
- vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
@@ -2437,26 +2437,26 @@ foreach vti = AllFloatVectors in {
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(SplatFPOp vti.ScalarRegClass:$rs2),
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1,
- vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ vti.ScalarRegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
// Rounding without exception to implement nearbyint.
def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK")
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
// 14.14. Vector Floating-Point Classify Instruction
def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK")
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
@@ -2466,16 +2466,16 @@ foreach fvti = AllFloatVectors in {
// 13.15. Vector Floating-Point Merge Instruction
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
let Predicates = GetVTypePredicates<ivti>.Predicates in {
- def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
+ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
fvti.RegClass:$rs1,
fvti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
(fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
+ fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW)>;
- def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
+ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp (SelectFPImm (XLenVT GPR:$imm))),
fvti.RegClass:$rs2,
VLOpFrag)),
@@ -2483,35 +2483,35 @@ foreach fvti = AllFloatVectors in {
(fvti.Vector (IMPLICIT_DEF)),
fvti.RegClass:$rs2,
GPR:$imm,
- (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
- def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
+ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
(fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
+ fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
- def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
+ def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask VMV0:$vm),
fvti.RegClass:$rs1,
fvti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
- fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
+ fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW)>;
- def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
+ def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask VMV0:$vm),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
- fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
+ fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW)>;
}
let Predicates = GetVTypePredicates<fvti>.Predicates in {
- def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
+ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rs2,
VLOpFrag)),
@@ -2519,16 +2519,16 @@ foreach fvti = AllFloatVectors in {
(fvti.Vector (IMPLICIT_DEF)),
fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
- (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
- def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
+ def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask VMV0:$vm),
(SplatFPOp fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
fvti.RegClass:$rs2, fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
- (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
+ (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
// 13.16. Vector Floating-Point Move Instruction
// If we're splatting fpimm0, use vmv.v.x vd, x0.
@@ -2585,11 +2585,11 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
GetVTypePredicates<fwti>.Predicates)) in
def : Pat<(fwti.Vector (any_riscv_fpextend_vl
(fvti.Vector fvti.RegClass:$rs1),
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK")
(fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
- (fvti.Mask V0),
+ (fvti.Mask VMV0:$vm),
GPR:$vl, fvti.Log2SEW, TA_MA)>;
}
@@ -2617,10 +2617,10 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
GetVTypePredicates<fwti>.Predicates)) in {
def : Pat<(fvti.Vector (any_riscv_fpround_vl
(fwti.Vector fwti.RegClass:$rs1),
- (fwti.Mask V0), VLOpFrag)),
+ (fwti.Mask VMV0:$vm), VLOpFrag)),
(!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK")
(fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
- (fwti.Mask V0),
+ (fwti.Mask VMV0:$vm),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
FRM_DYN,
@@ -2630,10 +2630,10 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
GetVTypePredicates<fwti>.Predicates) in
def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl
(fwti.Vector fwti.RegClass:$rs1),
- (fwti.Mask V0), VLOpFrag)),
+ (fwti.Mask VMV0:$vm), VLOpFrag)),
(!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK")
(fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
- (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
+ (fwti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MA)>;
}
}
@@ -2737,20 +2737,20 @@ foreach mti = AllMasks in {
VLOpFrag)),
(!cast<Instruction>("PseudoVCPOP_M_" # mti.BX)
VR:$rs2, GPR:$vl, mti.Log2SEW)>;
- def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0),
+ def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK")
- VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
+ VR:$rs2, (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW)>;
// 15.3 vfirst find-first-set mask bit
def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFIRST_M_" # mti.BX)
VR:$rs2, GPR:$vl, mti.Log2SEW)>;
- def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0),
+ def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK")
- VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
+ VR:$rs2, (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW)>;
}
}
@@ -2770,26 +2770,26 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
vti.RegClass:$rs1,
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2,
uimm5:$imm,
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
// emul = lmul * 16 / sew
@@ -2805,11 +2805,11 @@ foreach vti = AllIntegerVectors in {
(riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
(ivti.Vector ivti.RegClass:$rs1),
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2840,27 +2840,27 @@ foreach vti = AllFloatVectors in {
(riscv_vrgather_vv_vl vti.RegClass:$rs2,
(ivti.Vector vti.RegClass:$rs1),
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector
(riscv_vrgather_vx_vl vti.RegClass:$rs2,
uimm5:$imm,
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
defvar vlmul = vti.LMul;
@@ -2876,11 +2876,11 @@ foreach vti = AllFloatVectors in {
(riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
(ivti.Vector ivti.RegClass:$rs1),
vti.RegClass:$merge,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2917,10 +2917,10 @@ def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, [
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0),
+ def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK")
- (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
+ (vti.Vector (IMPLICIT_DEF)), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 1ffa78a28d09..c9ee9287de8f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -534,12 +534,12 @@ multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
GetVTypePredicates<vti>.Predicates) in {
def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$merge),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs1,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
GPR:$vl,
vti.Log2SEW,
TAIL_AGNOSTIC)>;
@@ -554,17 +554,17 @@ foreach vti = AllIntegerVectors in {
(vti.Vector vti.RegClass:$rs1),
(riscv_splat_vector -1),
(vti.Vector vti.RegClass:$merge),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$merge),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs2,
vti.RegClass:$rs1,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
GPR:$vl,
vti.Log2SEW,
TAIL_AGNOSTIC)>;
@@ -573,13 +573,13 @@ foreach vti = AllIntegerVectors in {
(not vti.ScalarRegClass:$rs1)),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$merge),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag)),
(!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs2,
vti.ScalarRegClass:$rs1,
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
GPR:$vl,
vti.Log2SEW,
TAIL_AGNOSTIC)>;
@@ -601,12 +601,12 @@ foreach vti = AllIntegerVectors in {
def : Pat<(riscv_rotl_vl vti.RegClass:$rs2,
(vti.Vector (SplatPat_uimm6 uimm6:$rs1)),
(vti.Vector vti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs2,
(!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1),
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
defm : VPatBinaryVL_VV_VX_VI<riscv_rotr_vl, "PseudoVROR", uimm6>;
@@ -621,55 +621,55 @@ foreach vtiToWti = AllWidenableIntVectors in {
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))),
(wti.Vector wti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
(wti.Vector wti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (SplatPat_uimm5 uimm5:$rs1)),
(wti.Vector wti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
(wti.Vector wti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
(wti.Vector wti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector (SplatPat_uimm5 uimm5:$rs1)),
(wti.Vector wti.RegClass:$merge),
- (vti.Mask V0), VLOpFrag),
+ (vti.Mask VMV0:$vm), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1,
- (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -811,12 +811,12 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs2),
(XLenVT uimm6:$rs1),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
VLOpFrag, (XLenVT timm:$policy))),
(PseudoMask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs2),
(InvRot64Imm uimm6:$rs1),
- (vti.Mask V0),
+ (vti.Mask VMV0:$vm),
GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 475ccc01df1f..379bf670878e 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -523,6 +523,18 @@ bool RISCVRegisterInfo::requiresVirtualBaseRegisters(
return true;
}
+bool RISCVRegisterInfo::shouldCoalesce(MachineInstr *MI,
+ const TargetRegisterClass *SrcRC,
+ unsigned SubReg,
+ const TargetRegisterClass *DstRC,
+ unsigned DstSubReg,
+ const TargetRegisterClass *NewRC,
+ LiveIntervals &LIS) const {
+ // if ((DstRC == &RISCV::VMV0RegClass && SrcRC != &RISCV::VMV0RegClass) || (SrcRC == &RISCV::VMV0RegClass && DstRC != &RISCV::VMV0RegClass))
+ // assert(false);
+ return true;
+}
+
// Returns true if the instruction's frame index reference would be better
// served by a base register other than FP or SP.
// Used by LocalStackSlotAllocation pass to determine which frame index
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index 8b729caa5f71..ad67f229a773 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -51,6 +51,11 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
+ bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC,
+ unsigned SubReg, const TargetRegisterClass *DstRC,
+ unsigned DstSubReg, const TargetRegisterClass *NewRC,
+ LiveIntervals &LIS) const override;
+
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override;
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
```
</p>
</details>
https://github.com/llvm/llvm-project/pull/88496
More information about the llvm-commits
mailing list