[llvm] [RISCV] Rename merge operand -> passthru. NFC (PR #100330)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 28 22:05:41 PDT 2024
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/100330
>From c3d7f90cd7fc04c58389660266c308dcb91f6d7a Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Wed, 24 Jul 2024 16:55:50 +0800
Subject: [PATCH 1/2] [RISCV] Rename merge operand -> passthru. NFC
We sometimes call the first tied dest operand in vector pseudos the merge operand, and other times the passthru.
Passthru seems to be more common, and it's what the C intrinsics call it[^1], so this renames all usages of merge to passthru to be consistent. It also helps prevent confusion with vmerge.vvm in some of the peephole optimisations.
[^1]: https://github.com/riscv-non-isa/rvv-intrinsic-doc/blob/main/doc/rvv-intrinsic-spec.adoc#the-passthrough-vd-argument-in-the-intrinsics
---
llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 38 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 46 +-
llvm/lib/Target/RISCV/RISCVISelLowering.h | 6 +-
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 14 +-
llvm/lib/Target/RISCV/RISCVInstrInfoV.td | 36 +-
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 482 +++++++++---------
.../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 242 ++++-----
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 100 ++--
llvm/lib/Target/RISCV/RISCVSchedSiFive7.td | 6 +-
llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td | 6 +-
llvm/lib/Target/RISCV/RISCVScheduleV.td | 12 +-
llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp | 13 +-
.../RISCV/rvv/vleff-vlseg2ff-output.ll | 4 +-
14 files changed, 504 insertions(+), 503 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index c0a4d0e9c520f..d9a6840a13aa8 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -975,7 +975,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
if (hasVLOutput && OpNo == 1)
continue;
- // Skip merge op. It should be the first operand after the defs.
+ // Skip passthru op. It should be the first operand after the defs.
if (OpNo == MI->getNumExplicitDefs() && MO.isReg() && MO.isTied()) {
assert(MCID.getOperandConstraint(OpNo, MCOI::TIED_TO) == 0 &&
"Expected tied to first def.");
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index eef6ae677ac85..2114bbe6d799f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3621,7 +3621,7 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) {
#endif
SmallVector<SDValue, 8> Ops;
- // Skip the merge operand at index 0 if !UseTUPseudo.
+ // Skip the passthru operand at index 0 if !UseTUPseudo.
for (unsigned I = !UseTUPseudo, E = N->getNumOperands(); I != E; I++) {
// Skip the mask, and the Glue.
SDValue Op = N->getOperand(I);
@@ -3684,9 +3684,9 @@ static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL) {
// ->
// %x = PseudoVADD_VV_MASK %false, ..., %mask
//
-// We can only fold if vmerge's merge operand, vmerge's false operand and
-// %true's merge operand (if it has one) are the same. This is because we have
-// to consolidate them into one merge operand in the result.
+// We can only fold if vmerge's passthru operand, vmerge's false operand and
+// %true's passthru operand (if it has one) are the same. This is because we
+// have to consolidate them into one passthru operand in the result.
//
// If %true is masked, then we can use its mask instead of vmerge's if vmerge's
// mask is all ones.
@@ -3697,12 +3697,12 @@ static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL) {
// The resulting VL is the minimum of the two VLs.
//
// The resulting policy is the effective policy the vmerge would have had,
-// i.e. whether or not it's merge operand was implicit-def.
+// i.e. whether or not it's passthru operand was implicit-def.
bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
- SDValue Merge, False, True, VL, Mask, Glue;
+ SDValue Passthru, False, True, VL, Mask, Glue;
// A vmv.v.v is equivalent to a vmerge with an all-ones mask.
if (IsVMv(N)) {
- Merge = N->getOperand(0);
+ Passthru = N->getOperand(0);
False = N->getOperand(0);
True = N->getOperand(1);
VL = N->getOperand(2);
@@ -3710,7 +3710,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
// mask later below.
} else {
assert(IsVMerge(N));
- Merge = N->getOperand(0);
+ Passthru = N->getOperand(0);
False = N->getOperand(1);
True = N->getOperand(2);
Mask = N->getOperand(3);
@@ -3721,9 +3721,9 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
assert(!Mask || cast<RegisterSDNode>(Mask)->getReg() == RISCV::V0);
assert(!Glue || Glue.getValueType() == MVT::Glue);
- // We require that either merge and false are the same, or that merge
+ // We require that either passthru and false are the same, or that passthru
// is undefined.
- if (Merge != False && !isImplicitDef(Merge))
+ if (Passthru != False && !isImplicitDef(Passthru))
return false;
assert(True.getResNo() == 0 &&
@@ -3753,11 +3753,11 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
if (!Info)
return false;
- // If True has a merge operand then it needs to be the same as vmerge's False,
- // since False will be used for the result's merge operand.
+ // If True has a passthru operand then it needs to be the same as vmerge's
+ // False, since False will be used for the result's passthru operand.
if (HasTiedDest && !isImplicitDef(True->getOperand(0))) {
- SDValue MergeOpTrue = True->getOperand(0);
- if (False != MergeOpTrue)
+ SDValue PassthruOpTrue = True->getOperand(0);
+ if (False != PassthruOpTrue)
return false;
}
@@ -3765,7 +3765,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
// 1s mask, since we're going to keep the mask from True.
if (IsMasked && Mask) {
// FIXME: Support mask agnostic True instruction which would have an
- // undef merge operand.
+ // undef passthru operand.
SDValue TrueMask =
getMaskSetter(True->getOperand(Info->MaskOpIdx),
True->getOperand(True->getNumOperands() - 1));
@@ -3823,8 +3823,8 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
return CLHS->getZExtValue() <= CRHS->getZExtValue() ? LHS : RHS;
};
- // Because N and True must have the same merge operand (or True's operand is
- // implicit_def), the "effective" body is the minimum of their VLs.
+ // Because N and True must have the same passthru operand (or True's operand
+ // is implicit_def), the "effective" body is the minimum of their VLs.
SDValue OrigVL = VL;
VL = GetMinVL(TrueVL, VL);
if (!VL)
@@ -3883,7 +3883,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
"Expected instructions with mask have a tied dest.");
#endif
- // Use a tumu policy, relaxing it to tail agnostic provided that the merge
+ // Use a tumu policy, relaxing it to tail agnostic provided that the passthru
// operand is undefined.
//
// However, if the VL became smaller than what the vmerge had originally, then
@@ -3891,7 +3891,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
// to the tail. In that case we always need to use tail undisturbed to
// preserve them.
bool MergeVLShrunk = VL != OrigVL;
- uint64_t Policy = (isImplicitDef(Merge) && !MergeVLShrunk)
+ uint64_t Policy = (isImplicitDef(Passthru) && !MergeVLShrunk)
? RISCVII::TAIL_AGNOSTIC
: /*TUMU*/ 0;
SDValue PolicyOp =
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9ce669a3122f5..e565b3a7f0215 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3293,25 +3293,25 @@ static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG,
static SDValue
getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
- const SDLoc &DL, EVT VT, SDValue Merge, SDValue Op,
+ const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op,
SDValue Offset, SDValue Mask, SDValue VL,
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
- if (Merge.isUndef())
+ if (Passthru.isUndef())
Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
- SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
+ SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops);
}
static SDValue
getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
- EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask,
+ EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask,
SDValue VL,
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
- if (Merge.isUndef())
+ if (Passthru.isUndef())
Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
- SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
+ SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops);
}
@@ -6079,8 +6079,8 @@ static unsigned getRISCVVLOp(SDValue Op) {
#undef VP_CASE
}
-/// Return true if a RISC-V target specified op has a merge operand.
-static bool hasMergeOp(unsigned Opcode) {
+/// Return true if a RISC-V target specified op has a passthru operand.
+static bool hasPassthruOp(unsigned Opcode) {
assert(Opcode > RISCVISD::FIRST_NUMBER &&
Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
@@ -10945,7 +10945,7 @@ SDValue RISCVTargetLowering::lowerVectorStrictFSetcc(SDValue Op,
True, VL});
Mask =
DAG.getNode(RISCVISD::VMAND_VL, DL, MaskVT, OrderMask1, OrderMask2, VL);
- // Use Mask as the merge operand to let the result be 0 if either of the
+ // Use Mask as the passthru operand to let the result be 0 if either of the
// inputs is unordered.
Res = DAG.getNode(RISCVISD::STRICT_FSETCCS_VL, DL,
DAG.getVTList(MaskVT, MVT::Other),
@@ -11050,7 +11050,7 @@ SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
SelectionDAG &DAG) const {
unsigned NewOpc = getRISCVVLOp(Op);
- bool HasMergeOp = hasMergeOp(NewOpc);
+ bool HasPassthruOp = hasPassthruOp(NewOpc);
bool HasMask = hasMaskOp(NewOpc);
MVT VT = Op.getSimpleValueType();
@@ -11075,7 +11075,7 @@ SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
SDLoc DL(Op);
auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
- if (HasMergeOp)
+ if (HasPassthruOp)
Ops.push_back(DAG.getUNDEF(ContainerVT));
if (HasMask)
Ops.push_back(Mask);
@@ -11103,7 +11103,7 @@ SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
// types.
SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG) const {
unsigned RISCVISDOpc = getRISCVVLOp(Op);
- bool HasMergeOp = hasMergeOp(RISCVISDOpc);
+ bool HasPassthruOp = hasPassthruOp(RISCVISDOpc);
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
@@ -11116,9 +11116,9 @@ SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG) const {
for (const auto &OpIdx : enumerate(Op->ops())) {
SDValue V = OpIdx.value();
assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
- // Add dummy merge value before the mask. Or if there isn't a mask, before
- // EVL.
- if (HasMergeOp) {
+ // Add dummy passthru value before the mask. Or if there isn't a mask,
+ // before EVL.
+ if (HasPassthruOp) {
auto MaskIdx = ISD::getVPMaskIdx(Op.getOpcode());
if (MaskIdx) {
if (*MaskIdx == OpIdx.index())
@@ -14650,25 +14650,25 @@ struct CombineResult {
/// The actual replacement is *not* done in that method.
SDValue materialize(SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) const {
- SDValue Mask, VL, Merge;
+ SDValue Mask, VL, Passthru;
std::tie(Mask, VL) =
NodeExtensionHelper::getMaskAndVL(Root, DAG, Subtarget);
switch (Root->getOpcode()) {
default:
- Merge = Root->getOperand(2);
+ Passthru = Root->getOperand(2);
break;
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::OR:
case ISD::SHL:
- Merge = DAG.getUNDEF(Root->getValueType(0));
+ Passthru = DAG.getUNDEF(Root->getValueType(0));
break;
}
return DAG.getNode(TargetOpcode, SDLoc(Root), Root->getValueType(0),
LHS.getOrCreateExtendedOp(Root, DAG, Subtarget, LHSExt),
RHS.getOrCreateExtendedOp(Root, DAG, Subtarget, RHSExt),
- Merge, Mask, VL);
+ Passthru, Mask, VL);
}
};
@@ -16151,8 +16151,8 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
SDValue MulOp = N->getOperand(1);
if (N->getOpcode() == RISCVISD::ADD_VL) {
- SDValue AddMergeOp = N->getOperand(2);
- if (!AddMergeOp.isUndef())
+ SDValue AddPassthruOp = N->getOperand(2);
+ if (!AddPassthruOp.isUndef())
return SDValue();
}
@@ -16173,9 +16173,9 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
if (!IsVWMulOpc(MulOp.getOpcode()))
return SDValue();
- SDValue MulMergeOp = MulOp.getOperand(2);
+ SDValue MulPassthruOp = MulOp.getOperand(2);
- if (!MulMergeOp.isUndef())
+ if (!MulPassthruOp.isUndef())
return SDValue();
auto [AddMask, AddVL] = [](SDNode *N, SelectionDAG &DAG,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 498c77f1875ed..d1d0760d8ffd1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -238,7 +238,7 @@ enum NodeType : unsigned {
VECREDUCE_FMIN_VL,
VECREDUCE_FMAX_VL,
- // Vector binary ops with a merge as a third operand, a mask as a fourth
+ // Vector binary ops with a passthru as a third operand, a mask as a fourth
// operand, and VL as a fifth operand.
ADD_VL,
AND_VL,
@@ -294,7 +294,7 @@ enum NodeType : unsigned {
FABS_VL,
FSQRT_VL,
FCLASS_VL,
- FCOPYSIGN_VL, // Has a merge operand
+ FCOPYSIGN_VL, // Has a passthru operand
VFCVT_RTZ_X_F_VL,
VFCVT_RTZ_XU_F_VL,
VFCVT_X_F_VL,
@@ -322,7 +322,7 @@ enum NodeType : unsigned {
VFWMSUB_VL,
VFWNMSUB_VL,
- // Widening instructions with a merge value a third operand, a mask as a
+ // Widening instructions with a passthru value a third operand, a mask as a
// fourth operand, and VL as a fifth operand.
VWMUL_VL,
VWMULU_VL,
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 96250b9c03b79..7b79026d30807 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -182,7 +182,7 @@ static bool isMaskRegOp(const MachineInstr &MI) {
/// Note that this is different from "agnostic" as defined by the vector
/// specification. Agnostic requires each lane to either be undisturbed, or
/// take the value -1; no other value is allowed.
-static bool hasUndefinedMergeOp(const MachineInstr &MI) {
+static bool hasUndefinedPassthru(const MachineInstr &MI) {
unsigned UseOpIdx;
if (!MI.isRegTiedToUseOperand(0, &UseOpIdx))
@@ -443,13 +443,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
Res.LMUL = DemandedFields::LMULNone;
Res.SEWLMULRatio = false;
Res.VLAny = false;
- // For vmv.s.x and vfmv.s.f, if the merge operand is *undefined*, we don't
+ // For vmv.s.x and vfmv.s.f, if the passthru is *undefined*, we don't
// need to preserve any other bits and are thus compatible with any larger,
// etype and can disregard policy bits. Warning: It's tempting to try doing
// this for any tail agnostic operation, but we can't as TA requires
// tail lanes to either be the original value or -1. We are writing
// unknown bits to the lanes here.
- if (hasUndefinedMergeOp(MI)) {
+ if (hasUndefinedPassthru(MI)) {
if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
else
@@ -469,7 +469,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
if (RISCVII::hasVLOp(MI.getDesc().TSFlags)) {
const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
- // A slidedown/slideup with an *undefined* merge op can freely clobber
+ // A slidedown/slideup with an *undefined* passthru can freely clobber
// elements not copied from the source vector (e.g. masked off, tail, or
// slideup's prefix). Notes:
// * We can't modify SEW here since the slide amount is in units of SEW.
@@ -478,7 +478,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// * The LMUL1 restriction is for machines whose latency may depend on VL.
// * As above, this is only legal for tail "undefined" not "agnostic".
if (isVSlideInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
- hasUndefinedMergeOp(MI)) {
+ hasUndefinedPassthru(MI)) {
Res.VLAny = false;
Res.VLZeroness = true;
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -492,7 +492,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// careful to not increase the number of active vector registers (unlike for
// vmv.s.x.)
if (isScalarSplatInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
- hasUndefinedMergeOp(MI)) {
+ hasUndefinedPassthru(MI)) {
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
Res.SEWLMULRatio = false;
Res.VLAny = false;
@@ -1000,7 +1000,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
bool TailAgnostic = true;
bool MaskAgnostic = true;
- if (!hasUndefinedMergeOp(MI)) {
+ if (!hasUndefinedPassthru(MI)) {
// Start with undisturbed.
TailAgnostic = false;
MaskAgnostic = false;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index b5817237b7fd2..5580504061637 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -92,34 +92,34 @@ def simm5_plus1_nonzero : ImmLeaf<XLenVT,
//===----------------------------------------------------------------------===//
// Common class of scheduling definitions.
-// `ReadVMergeOp` will be prepended to reads if instruction is masked.
+// `ReadVPassthru` will be prepended to reads if instruction is masked.
// `ReadVMask` will be appended to reads if instruction is masked.
// Operands:
// `writes` SchedWrites that are listed for each explicit def operand
// in order.
// `reads` SchedReads that are listed for each explicit use operand.
// `forceMasked` Forced to be masked (e.g. Add-with-Carry Instructions).
-// `forceMergeOpRead` Force to have read for merge operand.
+// `forcePassthruRead` Force to have read for passthru operand.
class SchedCommon<list<SchedWrite> writes, list<SchedRead> reads,
string mx = "WorstCase", int sew = 0, bit forceMasked = 0,
- bit forceMergeOpRead = 0> : Sched<[]> {
+ bit forcePassthruRead = 0> : Sched<[]> {
defvar isMasked = !ne(!find(NAME, "_MASK"), -1);
defvar isMaskedOrForceMasked = !or(forceMasked, isMasked);
- defvar mergeRead = !if(!or(!eq(mx, "WorstCase"), !eq(sew, 0)),
- !cast<SchedRead>("ReadVMergeOp_" # mx),
- !cast<SchedRead>("ReadVMergeOp_" # mx # "_E" #sew));
- defvar needsMergeRead = !or(isMaskedOrForceMasked, forceMergeOpRead);
+ defvar passthruRead = !if(!or(!eq(mx, "WorstCase"), !eq(sew, 0)),
+ !cast<SchedRead>("ReadVPassthru_" # mx),
+ !cast<SchedRead>("ReadVPassthru_" # mx # "_E" #sew));
+ defvar needsPassthruRead = !or(isMaskedOrForceMasked, forcePassthruRead);
defvar readsWithMask =
!if(isMaskedOrForceMasked, !listconcat(reads, [ReadVMask]), reads);
defvar allReads =
- !if(needsMergeRead, !listconcat([mergeRead], readsWithMask), reads);
+ !if(needsPassthruRead, !listconcat([passthruRead], readsWithMask), reads);
let SchedRW = !listconcat(writes, allReads);
}
// Common class of scheduling definitions for n-ary instructions.
// The scheudling resources are relevant to LMUL and may be relevant to SEW.
class SchedNary<string write, list<string> reads, string mx, int sew = 0,
- bit forceMasked = 0, bit forceMergeOpRead = 0>
+ bit forceMasked = 0, bit forcePassthruRead = 0>
: SchedCommon<[!cast<SchedWrite>(
!if(sew,
write # "_" # mx # "_E" # sew,
@@ -127,7 +127,7 @@ class SchedNary<string write, list<string> reads, string mx, int sew = 0,
!foreach(read, reads,
!cast<SchedRead>(!if(sew, read #"_" #mx #"_E" #sew,
read #"_" #mx))),
- mx, sew, forceMasked, forceMergeOpRead>;
+ mx, sew, forceMasked, forcePassthruRead>;
// Classes with postfix "MC" are only used in MC layer.
// For these classes, we assume that they are with the worst case costs and
@@ -135,22 +135,22 @@ class SchedNary<string write, list<string> reads, string mx, int sew = 0,
// For instructions with no operand.
class SchedNullary<string write, string mx, int sew = 0, bit forceMasked = 0,
- bit forceMergeOpRead = 0>:
- SchedNary<write, [], mx, sew, forceMasked, forceMergeOpRead>;
+ bit forcePassthruRead = 0>:
+ SchedNary<write, [], mx, sew, forceMasked, forcePassthruRead>;
class SchedNullaryMC<string write, bit forceMasked = 1>:
SchedNullary<write, "WorstCase", forceMasked=forceMasked>;
// For instructions with one operand.
class SchedUnary<string write, string read0, string mx, int sew = 0,
- bit forceMasked = 0, bit forceMergeOpRead = 0>:
- SchedNary<write, [read0], mx, sew, forceMasked, forceMergeOpRead>;
+ bit forceMasked = 0, bit forcePassthruRead = 0>:
+ SchedNary<write, [read0], mx, sew, forceMasked, forcePassthruRead>;
class SchedUnaryMC<string write, string read0, bit forceMasked = 1>:
SchedUnary<write, read0, "WorstCase", forceMasked=forceMasked>;
// For instructions with two operands.
class SchedBinary<string write, string read0, string read1, string mx,
- int sew = 0, bit forceMasked = 0, bit forceMergeOpRead = 0>
- : SchedNary<write, [read0, read1], mx, sew, forceMasked, forceMergeOpRead>;
+ int sew = 0, bit forceMasked = 0, bit forcePassthruRead = 0>
+ : SchedNary<write, [read0, read1], mx, sew, forceMasked, forcePassthruRead>;
class SchedBinaryMC<string write, string read0, string read1,
bit forceMasked = 1>:
SchedBinary<write, read0, read1, "WorstCase", forceMasked=forceMasked>;
@@ -165,9 +165,9 @@ class SchedTernaryMC<string write, string read0, string read1, string read2,
// For reduction instructions.
class SchedReduction<string write, string read, string mx, int sew,
- bit forceMergeOpRead = 0>
+ bit forcePassthruRead = 0>
: SchedCommon<[!cast<SchedWrite>(write #"_" #mx #"_E" #sew)],
- !listsplat(!cast<SchedRead>(read), 3), mx, sew, forceMergeOpRead>;
+ !listsplat(!cast<SchedRead>(read), 3), mx, sew, forcePassthruRead>;
class SchedReductionMC<string write, string readV, string readV0>:
SchedCommon<[!cast<SchedWrite>(write # "_WorstCase")],
[!cast<SchedRead>(readV), !cast<SchedRead>(readV0)],
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index b860273d639ee..5e5118affb5b3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -777,7 +777,7 @@ class VPseudoUSLoadNoMask<VReg RetClass,
class VPseudoUSLoadMask<VReg RetClass,
int EEW> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
GPRMem:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -785,7 +785,7 @@ class VPseudoUSLoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -811,7 +811,7 @@ class VPseudoUSLoadFFNoMask<VReg RetClass,
class VPseudoUSLoadFFMask<VReg RetClass,
int EEW> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
GPRMem:$rs1,
VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -819,7 +819,7 @@ class VPseudoUSLoadFFMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -845,7 +845,7 @@ class VPseudoSLoadNoMask<VReg RetClass,
class VPseudoSLoadMask<VReg RetClass,
int EEW> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
GPRMem:$rs1, GPR:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -853,7 +853,7 @@ class VPseudoSLoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -890,7 +890,7 @@ class VPseudoILoadMask<VReg RetClass,
bit EarlyClobber,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
GPRMem:$rs1, IdxClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -898,7 +898,7 @@ class VPseudoILoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
+ let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $pt", "$rd = $pt");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -963,13 +963,13 @@ class VPseudoSStoreMask<VReg StClass,
class VPseudoNullaryNoMask<VReg RegClass> :
Pseudo<(outs RegClass:$rd),
- (ins RegClass:$merge,
+ (ins RegClass:$pt,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -977,13 +977,13 @@ class VPseudoNullaryNoMask<VReg RegClass> :
class VPseudoNullaryMask<VReg RegClass> :
Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
- (ins GetVRegNoV0<RegClass>.R:$merge,
+ (ins GetVRegNoV0<RegClass>.R:$pt,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints ="$rd = $merge";
+ let Constraints ="$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let UsesMaskPolicy = 1;
@@ -1012,13 +1012,13 @@ class VPseudoUnaryNoMask<DAGOperand RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, OpClass:$rs2,
+ (ins RetClass:$pt, OpClass:$rs2,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1046,13 +1046,13 @@ class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm,
+ (ins RetClass:$pt, OpClass:$rs2, ixlenimm:$rm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1066,13 +1066,13 @@ class VPseudoUnaryMask<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1085,14 +1085,14 @@ class VPseudoUnaryMaskRoundingMode<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
VMaskOp:$vm, ixlenimm:$rm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1106,12 +1106,12 @@ class VPseudoUnaryMask_NoExcept<VReg RetClass,
VReg OpClass,
string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1124,13 +1124,13 @@ class VPseudoUnaryNoMask_FRM<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm,
+ (ins RetClass:$pt, OpClass:$rs2, ixlenimm:$frm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1143,14 +1143,14 @@ class VPseudoUnaryMask_FRM<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
VMaskOp:$vm, ixlenimm:$frm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1185,13 +1185,13 @@ class VPseudoUnaryMaskGPROut :
class VPseudoUnaryAnyMask<VReg RetClass,
VReg Op1Class> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, Op1Class:$rs2,
+ (ins RetClass:$pt, Op1Class:$rs2,
VR:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "@earlyclobber $rd, $rd = $merge";
+ let Constraints = "@earlyclobber $rd, $rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
}
@@ -1219,13 +1219,13 @@ class VPseudoBinaryNoMaskPolicy<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
+ (ins RetClass:$pt, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1239,12 +1239,12 @@ class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
int UsesVXRM_ = 1,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
+ (ins RetClass:$pt, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1260,14 +1260,14 @@ class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
int UsesVXRM_,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1358,14 +1358,14 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1377,14 +1377,14 @@ class VPseudoTernaryMaskPolicy<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1394,7 +1394,7 @@ class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm,
ixlenimm:$rm,
@@ -1403,7 +1403,7 @@ class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1418,14 +1418,14 @@ class VPseudoBinaryMOutMask<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge,
+ (ins RetClass:$pt,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1440,14 +1440,14 @@ class VPseudoTiedBinaryMask<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1461,7 +1461,7 @@ class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge,
+ (ins GetVRegNoV0<RetClass>.R:$pt,
Op2Class:$rs1,
VMaskOp:$vm,
ixlenimm:$rm,
@@ -1470,7 +1470,7 @@ class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1511,13 +1511,13 @@ class VPseudoTiedBinaryCarryIn<VReg RetClass,
LMULInfo MInfo,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
+ (ins RetClass:$pt, Op1Class:$rs2, Op2Class:$rs1,
VMV0:$carry, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1602,14 +1602,14 @@ class VPseudoUSSegLoadMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1637,14 +1637,14 @@ class VPseudoUSSegLoadFFMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
- (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1655,7 +1655,7 @@ class VPseudoSSegLoadNoMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl,
+ (ins RetClass:$pt, GPRMem:$rs1, GPR:$offset, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
@@ -1665,14 +1665,14 @@ class VPseudoSSegLoadNoMask<VReg RetClass,
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
}
class VPseudoSSegLoadMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -1680,7 +1680,7 @@ class VPseudoSSegLoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $merge";
+ let Constraints = "$rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1694,7 +1694,7 @@ class VPseudoISegLoadNoMask<VReg RetClass,
bits<4> NF,
bit Ordered> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
+ (ins RetClass:$pt, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
@@ -1703,7 +1703,7 @@ class VPseudoISegLoadNoMask<VReg RetClass,
let hasSideEffects = 0;
// For vector indexed segment loads, the destination vector register groups
// cannot overlap the source vector register group
- let Constraints = "@earlyclobber $rd, $rd = $merge";
+ let Constraints = "@earlyclobber $rd, $rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1716,7 +1716,7 @@ class VPseudoISegLoadMask<VReg RetClass,
bits<4> NF,
bit Ordered> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$merge, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -1726,7 +1726,7 @@ class VPseudoISegLoadMask<VReg RetClass,
let hasSideEffects = 0;
// For vector indexed segment loads, the destination vector register groups
// cannot overlap the source vector register group
- let Constraints = "@earlyclobber $rd, $rd = $merge";
+ let Constraints = "@earlyclobber $rd, $rd = $pt";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -2024,11 +2024,11 @@ multiclass VPseudoVSFS_M {
let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VPseudoUnaryNoMaskNoPolicy<VR, VR, constraint>,
SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
let ForceTailAgnostic = true in
def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2038,11 +2038,11 @@ multiclass VPseudoVID_V {
defvar mx = m.MX;
let VLMul = m.value in {
def "_V_" # mx : VPseudoNullaryNoMask<m.vrclass>,
- SchedNullary<"WriteVIdxV", mx, forceMergeOpRead=true>;
+ SchedNullary<"WriteVIdxV", mx, forcePassthruRead=true>;
def "_V_" # mx # "_MASK" : VPseudoNullaryMask<m.vrclass>,
RISCVMaskedPseudo<MaskIdx=1>,
SchedNullary<"WriteVIdxV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2063,11 +2063,11 @@ multiclass VPseudoVIOTA_M {
let VLMul = m.value in {
def "_" # mx : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
SchedUnary<"WriteVIotaV", "ReadVIotaV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
def "_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
RISCVMaskedPseudo<MaskIdx=2, ActiveAffectsRes=true>,
SchedUnary<"WriteVIotaV", "ReadVIotaV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2227,7 +2227,7 @@ multiclass VPseudoVGTR_EI16_VV {
: VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul,
constraint, e>,
SchedBinary<"WriteVRGatherEI16VV", "ReadVRGatherEI16VV_data",
- "ReadVRGatherEI16VV_index", mx, e, forceMergeOpRead=true>;
+ "ReadVRGatherEI16VV_index", mx, e, forcePassthruRead=true>;
}
}
}
@@ -2246,7 +2246,7 @@ multiclass VPseudoVSLD1_VX<string Constraint = ""> {
foreach m = MxList in {
defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
SchedBinary<"WriteVISlide1X", "ReadVISlideV", "ReadVISlideX",
- m.MX, forceMergeOpRead=true>;
+ m.MX, forcePassthruRead=true>;
}
}
@@ -2267,7 +2267,7 @@ multiclass VPseudoVSLD1_VF<string Constraint = ""> {
defm "_V" #f.FX
: VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
SchedBinary<"WriteVFSlide1F", "ReadVFSlideV", "ReadVFSlideF", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2445,7 +2445,7 @@ multiclass VPseudoVMRG_FM {
: VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R, m.vrclass,
f.fprclass, m>,
SchedBinary<"WriteVFMergeV", "ReadVFMergeV", "ReadVFMergeF", mx,
- forceMasked=1, forceMergeOpRead=true>;
+ forceMasked=1, forcePassthruRead=true>;
}
}
}
@@ -2472,13 +2472,13 @@ multiclass VPseudoUnaryVMV_V_X_I {
let VLMul = m.value in {
def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
SchedUnary<"WriteVIMovV", "ReadVIMovV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
def "_X_" # mx : VPseudoUnaryNoMask<m.vrclass, GPR>,
SchedUnary<"WriteVIMovX", "ReadVIMovX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
def "_I_" # mx : VPseudoUnaryNoMask<m.vrclass, simm5>,
SchedNullary<"WriteVIMovI", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2491,7 +2491,7 @@ multiclass VPseudoVMV_F {
let VLMul = m.value in {
def "_" # f.FX # "_" # mx :
VPseudoUnaryNoMask<m.vrclass, f.fprclass>,
- SchedUnary<"WriteVFMovV", "ReadVFMovF", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVFMovV", "ReadVFMovF", mx, forcePassthruRead=true>;
}
}
}
@@ -2503,11 +2503,11 @@ multiclass VPseudoVCLS_V {
let VLMul = m.value in {
def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
def "_V_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
RISCVMaskedPseudo<MaskIdx=2>,
SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2523,12 +2523,12 @@ multiclass VPseudoVSQR_V_RM {
let SEW = e in {
def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
def "_V" #suffix # "_MASK"
: VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
RISCVMaskedPseudo<MaskIdx = 2>,
SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2541,11 +2541,11 @@ multiclass VPseudoVRCP_V {
let VLMul = m.value in {
def "_V_" # mx # "_E" # e
: VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
- SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
def "_V_" # mx # "_E" # e # "_MASK"
: VPseudoUnaryMask<m.vrclass, m.vrclass>,
RISCVMaskedPseudo<MaskIdx = 2>,
- SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
}
}
}
@@ -2558,11 +2558,11 @@ multiclass VPseudoVRCP_V_RM {
let VLMul = m.value in {
def "_V_" # mx # "_E" # e
: VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
- SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
def "_V_" # mx # "_E" # e # "_MASK"
: VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
RISCVMaskedPseudo<MaskIdx = 2>,
- SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>;
+ SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
}
}
}
@@ -2575,11 +2575,11 @@ multiclass PseudoVEXT_VF2 {
defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF4"), !eq(mx, "MF2"), !eq(mx, "M1")), 1, 3);
let VLMul = m.value in {
def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
- SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
def "_" # mx # "_MASK" :
VPseudoUnaryMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
RISCVMaskedPseudo<MaskIdx=2>,
- SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
}
}
}
@@ -2591,11 +2591,11 @@ multiclass PseudoVEXT_VF4 {
defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF2"), !eq(mx, "M1"), !eq(mx, "M2")), 1, 3);
let VLMul = m.value in {
def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
- SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
def "_" # mx # "_MASK" :
VPseudoUnaryMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
RISCVMaskedPseudo<MaskIdx=2>,
- SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
}
}
}
@@ -2607,11 +2607,11 @@ multiclass PseudoVEXT_VF8 {
defvar CurrTypeConstraints = !if(!or(!eq(mx, "M1"), !eq(mx, "M2"), !eq(mx, "M4")), 1, 3);
let VLMul = m.value in {
def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
- SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
def "_" # mx # "_MASK" :
VPseudoUnaryMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
RISCVMaskedPseudo<MaskIdx=2>,
- SchedUnary<"WriteVExtV", "ReadVExtV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
}
}
}
@@ -2657,16 +2657,16 @@ multiclass VPseudoVGTR_VV_VX_VI {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VX<m, constraint>,
SchedBinary<"WriteVRGatherVX", "ReadVRGatherVX_data",
- "ReadVRGatherVX_index", mx, forceMergeOpRead=true>;
+ "ReadVRGatherVX_index", mx, forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VI<uimm5, m, constraint>,
SchedUnary<"WriteVRGatherVI", "ReadVRGatherVI_data", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defvar sews = SchedSEWSet<mx>.val;
foreach e = sews in {
defm "" : VPseudoBinaryV_VV<m, constraint, e>,
SchedBinary<"WriteVRGatherVV", "ReadVRGatherVV_data",
- "ReadVRGatherVV_index", mx, e, forceMergeOpRead=true>;
+ "ReadVRGatherVV_index", mx, e, forcePassthruRead=true>;
}
}
}
@@ -2676,12 +2676,12 @@ multiclass VPseudoVSALU_VV_VX_VI<bit Commutable = 0> {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VI<simm5, m>,
- SchedUnary<"WriteVSALUI", "ReadVSALUV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVSALUI", "ReadVSALUV", mx, forcePassthruRead=true>;
}
}
@@ -2691,12 +2691,12 @@ multiclass VPseudoVSHT_VV_VX_VI {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m>,
SchedBinary<"WriteVShiftV", "ReadVShiftV", "ReadVShiftV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVShiftX", "ReadVShiftV", "ReadVShiftX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VI<uimm5, m>,
- SchedUnary<"WriteVShiftI", "ReadVShiftV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVShiftI", "ReadVShiftV", mx, forcePassthruRead=true>;
}
}
@@ -2705,12 +2705,12 @@ multiclass VPseudoVSSHT_VV_VX_VI_RM {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV_RM<m>,
SchedBinary<"WriteVSShiftV", "ReadVSShiftV", "ReadVSShiftV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX_RM<m>,
SchedBinary<"WriteVSShiftX", "ReadVSShiftV", "ReadVSShiftX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VI_RM<uimm5, m>,
- SchedUnary<"WriteVSShiftI", "ReadVSShiftV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVSShiftI", "ReadVSShiftV", mx, forcePassthruRead=true>;
}
}
@@ -2719,12 +2719,12 @@ multiclass VPseudoVALU_VV_VX_VI<bit Commutable = 0> {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VI<simm5, m>,
- SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forcePassthruRead=true>;
}
}
@@ -2733,10 +2733,10 @@ multiclass VPseudoVSALU_VV_VX {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m>,
SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2745,10 +2745,10 @@ multiclass VPseudoVSMUL_VV_VX_RM {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV_RM<m, Commutable=1>,
SchedBinary<"WriteVSMulV", "ReadVSMulV", "ReadVSMulV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX_RM<m>,
SchedBinary<"WriteVSMulX", "ReadVSMulV", "ReadVSMulX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2757,10 +2757,10 @@ multiclass VPseudoVAALU_VV_VX_RM<bit Commutable = 0> {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV_RM<m, Commutable=Commutable>,
SchedBinary<"WriteVAALUV", "ReadVAALUV", "ReadVAALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX_RM<m>,
SchedBinary<"WriteVAALUX", "ReadVAALUV", "ReadVAALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2802,14 +2802,14 @@ multiclass VPseudoVFMUL_VV_VF_RM {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
defm "" : VPseudoBinaryFV_VV_RM<m, e>,
SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
foreach f = FPList in {
foreach m = f.MxList in {
defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX,
- f.SEW, forceMergeOpRead=true>;
+ f.SEW, forcePassthruRead=true>;
}
}
}
@@ -2821,7 +2821,7 @@ multiclass VPseudoVFDIV_VV_VF_RM {
foreach e = sews in {
defm "" : VPseudoBinaryFV_VV_RM<m, e>,
SchedBinary<"WriteVFDivV", "ReadVFDivV", "ReadVFDivV", mx, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2829,7 +2829,7 @@ multiclass VPseudoVFDIV_VV_VF_RM {
foreach m = f.MxList in {
defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2839,7 +2839,7 @@ multiclass VPseudoVFRDIV_VF_RM {
foreach m = f.MxList in {
defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
}
@@ -2848,10 +2848,10 @@ multiclass VPseudoVALU_VV_VX {
foreach m = MxList in {
defm "" : VPseudoBinaryV_VV<m>,
SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2860,14 +2860,14 @@ multiclass VPseudoVSGNJ_VV_VF {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
defm "" : VPseudoBinaryV_VV<m, sew=e>,
SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX,
- e, forceMergeOpRead=true>;
+ e, forcePassthruRead=true>;
}
foreach f = FPList in {
foreach m = f.MxList in {
defm "" : VPseudoBinaryV_VF<m, f, sew=f.SEW>,
SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX,
- f.SEW, forceMergeOpRead=true>;
+ f.SEW, forcePassthruRead=true>;
}
}
}
@@ -2877,14 +2877,14 @@ multiclass VPseudoVMAX_VV_VF {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
defm "" : VPseudoBinaryV_VV<m, sew=e>,
SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV",
- m.MX, e, forceMergeOpRead=true>;
+ m.MX, e, forcePassthruRead=true>;
}
foreach f = FPList in {
foreach m = f.MxList in {
defm "" : VPseudoBinaryV_VF<m, f, sew=f.SEW>,
SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF",
- m.MX, f.SEW, forceMergeOpRead=true>;
+ m.MX, f.SEW, forcePassthruRead=true>;
}
}
}
@@ -2894,14 +2894,14 @@ multiclass VPseudoVALU_VV_VF_RM {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
defm "" : VPseudoBinaryFV_VV_RM<m, e>,
SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
foreach f = FPList in {
foreach m = f.MxList in {
defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
- f.SEW, forceMergeOpRead=true>;
+ f.SEW, forcePassthruRead=true>;
}
}
}
@@ -2911,7 +2911,7 @@ multiclass VPseudoVALU_VF_RM {
foreach m = f.MxList in {
defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
- f.SEW, forceMergeOpRead=true>;
+ f.SEW, forcePassthruRead=true>;
}
}
}
@@ -2921,9 +2921,9 @@ multiclass VPseudoVALU_VX_VI {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VI<simm5, m>,
- SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forcePassthruRead=true>;
}
}
@@ -2932,10 +2932,10 @@ multiclass VPseudoVWALU_VV_VX<bit Commutable = 0> {
defvar mx = m.MX;
defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryW_VX<m>,
SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2944,10 +2944,10 @@ multiclass VPseudoVWMUL_VV_VX<bit Commutable = 0> {
defvar mx = m.MX;
defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
SchedBinary<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryW_VX<m>,
SchedBinary<"WriteVIWMulX", "ReadVIWMulV", "ReadVIWMulX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2956,14 +2956,14 @@ multiclass VPseudoVWMUL_VV_VF_RM {
foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
defm "" : VPseudoBinaryW_VV_RM<m, sew=e>,
SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX,
- e, forceMergeOpRead=true>;
+ e, forcePassthruRead=true>;
}
foreach f = FPListW in {
foreach m = f.MxListFW in {
defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX,
- f.SEW, forceMergeOpRead=true>;
+ f.SEW, forcePassthruRead=true>;
}
}
}
@@ -2973,10 +2973,10 @@ multiclass VPseudoVWALU_WV_WX {
defvar mx = m.MX;
defm "" : VPseudoBinaryW_WV<m>,
SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryW_WX<m>,
SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -2985,14 +2985,14 @@ multiclass VPseudoVFWALU_VV_VF_RM {
foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
defm "" : VPseudoBinaryW_VV_RM<m, sew=e>,
SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
- e, forceMergeOpRead=true>;
+ e, forcePassthruRead=true>;
}
foreach f = FPListW in {
foreach m = f.MxListFW in {
defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
- f.SEW, forceMergeOpRead=true>;
+ f.SEW, forcePassthruRead=true>;
}
}
}
@@ -3002,13 +3002,13 @@ multiclass VPseudoVFWALU_WV_WF_RM {
foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
defm "" : VPseudoBinaryW_WV_RM<m, sew=e>,
SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
- e, forceMergeOpRead=true>;
+ e, forcePassthruRead=true>;
}
foreach f = FPListW in {
foreach m = f.MxListFW in {
defm "" : VPseudoBinaryW_WF_RM<m, f, sew=f.SEW>,
SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
- f.SEW, forceMergeOpRead=true>;
+ f.SEW, forcePassthruRead=true>;
}
}
}
@@ -3020,17 +3020,17 @@ multiclass VPseudoVMRG_VM_XM_IM {
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, m.vrclass, m>,
SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
def "_VXM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, GPR, m>,
SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
def "_VIM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, simm5, m>,
SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3039,13 +3039,13 @@ multiclass VPseudoVCALU_VM_XM_IM {
defvar mx = m.MX;
defm "" : VPseudoTiedBinaryV_VM<m, Commutable=1>,
SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoTiedBinaryV_XM<m>,
SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoTiedBinaryV_IM<m>,
SchedUnary<"WriteVICALUI", "ReadVICALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3054,10 +3054,10 @@ multiclass VPseudoVCALU_VM_XM {
defvar mx = m.MX;
defm "" : VPseudoTiedBinaryV_VM<m>,
SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoTiedBinaryV_XM<m>,
SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3068,13 +3068,13 @@ multiclass VPseudoVCALUM_VM_XM_IM {
defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
Commutable=1, TargetConstraintType=2>,
SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=constraint, TargetConstraintType=2>,
SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=1, Constraint=constraint, TargetConstraintType=2>,
SchedUnary<"WriteVICALUI", "ReadVICALUV", mx, forceMasked=1,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3085,11 +3085,11 @@ multiclass VPseudoVCALUM_VM_XM {
defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
TargetConstraintType=2>,
SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
TargetConstraintType=2>,
SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3100,13 +3100,13 @@ multiclass VPseudoVCALUM_V_X_I {
defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=constraint,
Commutable=1, TargetConstraintType=2>,
SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=0, Constraint=constraint>,
SchedUnary<"WriteVICALUI", "ReadVICALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3116,10 +3116,10 @@ multiclass VPseudoVCALUM_V_X {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3128,13 +3128,13 @@ multiclass VPseudoVNCLP_WV_WX_WI_RM {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_WV_RM<m>,
SchedBinary<"WriteVNClipV", "ReadVNClipV", "ReadVNClipV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_WX_RM<m>,
SchedBinary<"WriteVNClipX", "ReadVNClipV", "ReadVNClipX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_WI_RM<m>,
SchedUnary<"WriteVNClipI", "ReadVNClipV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3143,13 +3143,13 @@ multiclass VPseudoVNSHT_WV_WX_WI {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_WV<m>,
SchedBinary<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_WX<m>,
SchedBinary<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_WI<m>,
SchedUnary<"WriteVNShiftI", "ReadVNShiftV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3591,7 +3591,7 @@ multiclass VPseudoVCVTI_V {
foreach m = MxListF in {
defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3599,7 +3599,7 @@ multiclass VPseudoVCVTI_V_RM {
foreach m = MxListF in {
defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m>,
SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3607,7 +3607,7 @@ multiclass VPseudoVCVTI_RM_V {
foreach m = MxListF in {
defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m>,
SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3615,7 +3615,7 @@ multiclass VPseudoVFROUND_NOEXCEPT_V {
foreach m = MxListF in {
defm _V : VPseudoConversionNoExcept<m.vrclass, m.vrclass, m>,
SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3624,7 +3624,7 @@ multiclass VPseudoVCVTF_V_RM {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m, sew=e>,
SchedUnary<"WriteVFCvtIToFV", "ReadVFCvtIToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3633,7 +3633,7 @@ multiclass VPseudoVCVTF_RM_V {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
defm _V : VPseudoConversionRM<m.vrclass, m.vrclass, m, sew=e>,
SchedUnary<"WriteVFCvtIToFV", "ReadVFCvtIToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3642,7 +3642,7 @@ multiclass VPseudoVWCVTI_V {
foreach m = MxListFW in {
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3651,7 +3651,7 @@ multiclass VPseudoVWCVTI_V_RM {
foreach m = MxListFW in {
defm _V : VPseudoConversionRoundingMode<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3660,7 +3660,7 @@ multiclass VPseudoVWCVTI_RM_V {
foreach m = MxListFW in {
defm _V : VPseudoConversionRM<m.wvrclass, m.vrclass, m, constraint>,
SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3671,7 +3671,7 @@ multiclass VPseudoVWCVTF_V {
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=e,
TargetConstraintType=3>,
SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3682,7 +3682,7 @@ multiclass VPseudoVWCVTD_V {
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=e,
TargetConstraintType=3>,
SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3691,7 +3691,7 @@ multiclass VPseudoVNCVTI_W {
foreach m = MxListW in {
defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3700,7 +3700,7 @@ multiclass VPseudoVNCVTI_W_RM {
foreach m = MxListW in {
defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3709,7 +3709,7 @@ multiclass VPseudoVNCVTI_RM_W {
foreach m = MxListW in {
defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3721,7 +3721,7 @@ multiclass VPseudoVNCVTF_W_RM {
constraint, sew=e,
TargetConstraintType=2>,
SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3731,7 +3731,7 @@ multiclass VPseudoVNCVTF_RM_W {
foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
defm _W : VPseudoConversionRM<m.vrclass, m.wvrclass, m, constraint, sew=e>,
SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3742,7 +3742,7 @@ multiclass VPseudoVNCVTD_W {
defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, sew=e,
TargetConstraintType=2>,
SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3754,7 +3754,7 @@ multiclass VPseudoVNCVTD_W_RM {
constraint, sew=e,
TargetConstraintType=2>,
SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, e,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -3922,14 +3922,14 @@ class VPatUnaryNoMask<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
inst#"_"#kind#"_"#vlmul.MX))
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
GPR:$vl, log2sew, TU_MU)>;
@@ -3944,7 +3944,7 @@ class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(XLenVT timm:$round),
VLOpFrag)),
@@ -3952,7 +3952,7 @@ class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
inst#"_"#kind#"_"#vlmul.MX))
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(XLenVT timm:$round),
GPR:$vl, log2sew, TU_MU)>;
@@ -3968,7 +3968,7 @@ class VPatUnaryNoMaskRTZ<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(XLenVT 0b001),
VLOpFrag)),
@@ -3976,7 +3976,7 @@ class VPatUnaryNoMaskRTZ<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
inst#"_"#kind#"_"#vlmul.MX))
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
GPR:$vl, log2sew, TU_MU)>;
@@ -3992,7 +3992,7 @@ class VPatUnaryMask<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
@@ -4000,7 +4000,7 @@ class VPatUnaryMask<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
@@ -4016,7 +4016,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
(XLenVT timm:$round),
@@ -4025,7 +4025,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
(XLenVT timm:$round),
@@ -4043,7 +4043,7 @@ class VPatUnaryMaskRTZ<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
(XLenVT 0b001),
@@ -4052,7 +4052,7 @@ class VPatUnaryMaskRTZ<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
GPR:$vl, log2sew, (XLenVT timm:$policy))>;
@@ -4071,12 +4071,12 @@ class VPatMaskUnaryMask<string intrinsic_name,
string inst,
MTypeInfo mti> :
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
- (mti.Mask VR:$merge),
+ (mti.Mask VR:$pt),
(mti.Mask VR:$rs2),
(mti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
- (mti.Mask VR:$merge),
+ (mti.Mask VR:$pt),
(mti.Mask VR:$rs2),
(mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
@@ -4091,12 +4091,12 @@ class VPatUnaryAnyMask<string intrinsic,
VReg result_reg_class,
VReg op1_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
GPR:$vl, log2sew)>;
@@ -4128,12 +4128,12 @@ class VPatBinaryNoMaskTU<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
GPR:$vl, sew, TU_MU)>;
@@ -4148,13 +4148,13 @@ class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
VLOpFrag)),
(!cast<Instruction>(inst)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
@@ -4190,13 +4190,13 @@ class VPatBinaryMask<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
@@ -4212,13 +4212,13 @@ class VPatBinaryMaskPolicy<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4234,14 +4234,14 @@ class VPatBinaryMaskPolicyRoundingMode<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
@@ -4260,13 +4260,13 @@ class VPatBinaryMaskSwapped<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
@@ -4315,12 +4315,12 @@ class VPatTiedBinaryNoMaskTU<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_TIED")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
GPR:$vl, sew, TU_MU)>;
@@ -4332,13 +4332,13 @@ class VPatTiedBinaryNoMaskTURoundingMode<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
VLOpFrag)),
(!cast<Instruction>(inst#"_TIED")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
GPR:$vl, sew, TU_MU)>;
@@ -4352,13 +4352,13 @@ class VPatTiedBinaryMask<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4371,14 +4371,14 @@ class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$merge),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
(mask_type V0),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op2_type op2_kind:$rs2),
(mask_type V0),
(XLenVT timm:$round),
@@ -4678,15 +4678,15 @@ multiclass VPatNullaryV<string intrinsic, string instruction> {
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
- (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$pt),
VLOpFrag)),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
- vti.RegClass:$merge, GPR:$vl, vti.Log2SEW, TU_MU)>;
+ vti.RegClass:$pt, GPR:$vl, vti.Log2SEW, TU_MU)>;
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
- (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$pt),
(vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
- vti.RegClass:$merge, (vti.Mask V0),
+ vti.RegClass:$pt, (vti.Mask V0),
GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
}
}
@@ -4781,13 +4781,13 @@ multiclass VPatBinaryCarryInTAIL<string intrinsic,
VReg op1_reg_class,
DAGOperand op2_kind> {
def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
@@ -6065,12 +6065,12 @@ multiclass VPatCompare_VI<string intrinsic, string inst,
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
let Predicates = GetVTypePredicates<vti>.Predicates in
- def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
+ def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$pt),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar ImmType:$rs2),
(vti.Mask V0),
VLOpFrag)),
- (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
+ (PseudoMask VR:$pt, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
}
@@ -6215,24 +6215,24 @@ foreach vti = AllIntegerVectors in {
// to use a more complex splat sequence. Add the pattern for all VTs for
// consistency.
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX)
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
GPR:$vl,
vti.Log2SEW, TU_MU)>;
- def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
(vti.Mask V0),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
(vti.Mask V0),
@@ -6241,24 +6241,24 @@ foreach vti = AllIntegerVectors in {
(XLenVT timm:$policy))>;
// Match VSUB with a small immediate to vadd.vi by negating the immediate.
- def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
VLOpFrag)),
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX)
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs1,
(NegImm simm5_plus1:$rs2),
GPR:$vl,
vti.Log2SEW, TU_MU)>;
- def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(vti.Mask V0),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs1,
(NegImm simm5_plus1:$rs2),
(vti.Mask V0),
@@ -6907,20 +6907,20 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
foreach vti = AllIntegerVectors in {
// Emit shift by 1 as an add since it might be faster.
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs1),
(XLenVT 1), VLOpFrag)),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX)
- vti.RegClass:$merge, vti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs1,
vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs1),
(XLenVT 1),
(vti.Mask V0),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs1,
vti.RegClass:$rs1,
(vti.Mask V0),
@@ -7258,11 +7258,11 @@ foreach vti = AllFloatVectors in {
foreach fvti = AllFloatVectors in {
defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
let Predicates = GetVTypePredicates<fvti>.Predicates in
- def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$merge),
+ def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$pt),
(fvti.Vector fvti.RegClass:$rs2),
(fvti.Scalar (fpimm0)),
(fvti.Mask V0), VLOpFrag)),
- (instr fvti.RegClass:$merge, fvti.RegClass:$rs2, 0,
+ (instr fvti.RegClass:$pt, fvti.RegClass:$rs2, 0,
(fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 2ed71f6b88974..ad585d3a46fee 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -36,7 +36,7 @@ def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
SDTCisSameNumEltsAs<0, 4>,
SDTCisVT<5, XLenVT>]>;
-// Input: (vector, vector/scalar, merge, mask, roundmode, vl)
+// Input: (vector, vector/scalar, passthru, mask, roundmode, vl)
def SDT_RISCVVNBinOp_RM_VL : SDTypeProfile<1, 6, [SDTCisVec<0>, SDTCisInt<0>,
SDTCisSameAs<0, 3>,
SDTCisSameNumEltsAs<0, 1>,
@@ -149,18 +149,18 @@ def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp
def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>;
-def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
-def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- [(riscv_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
-def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- [(riscv_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
-def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
- (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
+def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ [(riscv_fadd_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
+def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ [(riscv_fsub_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
+def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ [(riscv_fmul_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
+def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
+ (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
[(riscv_fsqrt_vl node:$src, node:$mask, node:$vl),
(riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>;
@@ -318,12 +318,12 @@ def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$v
def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>;
def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>;
def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>;
-def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
- [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
- (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>;
-def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
- [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl),
- (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$merge, node:$mask, node:$vl)]>;
+def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
+ [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
+ (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl)]>;
+def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
+ [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
+ (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl)]>;
def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL",
SDTypeProfile<1, 5, [SDTCisVec<0>,
@@ -640,14 +640,14 @@ class VPatBinaryVL_V<SDPatternOperator vop,
: Pat<(result_type (vop
(op1_type op1_reg_class:$rs1),
(op2_type op2_reg_class:$rs2),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$merge,
+ result_reg_class:$pt,
op1_reg_class:$rs1,
op2_reg_class:$rs2,
(mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
@@ -668,14 +668,14 @@ class VPatBinaryVL_V_RM<SDPatternOperator vop,
: Pat<(result_type (vop
(op1_type op1_reg_class:$rs1),
(op2_type op2_reg_class:$rs2),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$merge,
+ result_reg_class:$pt,
op1_reg_class:$rs1,
op2_reg_class:$rs2,
(mask_type V0),
@@ -800,14 +800,14 @@ class VPatBinaryVL_XI<SDPatternOperator vop,
: Pat<(result_type (vop
(vop1_type vop_reg_class:$rs1),
(vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#_#suffix#_#vlmul.MX#"_MASK"))
- result_reg_class:$merge,
+ result_reg_class:$pt,
vop_reg_class:$rs1,
xop_kind:$rs2,
(mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
@@ -924,14 +924,14 @@ class VPatBinaryVL_VF<SDPatternOperator vop,
bit isSEWAware = 0>
: Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
(vop2_type (SplatFPOp scalar_reg_class:$rs2)),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$merge,
+ result_reg_class:$pt,
vop_reg_class:$rs1,
scalar_reg_class:$rs2,
(mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
@@ -950,14 +950,14 @@ class VPatBinaryVL_VF_RM<SDPatternOperator vop,
bit isSEWAware = 0>
: Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
(vop2_type (SplatFPOp scalar_reg_class:$rs2)),
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$pt),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$merge,
+ result_reg_class:$pt,
vop_reg_class:$rs1,
scalar_reg_class:$rs2,
(mask_type V0),
@@ -1004,14 +1004,14 @@ multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name,
let Predicates = GetVTypePredicates<fvti>.Predicates in
def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
fvti.RegClass:$rs1,
- (fvti.Vector fvti.RegClass:$merge),
+ (fvti.Vector fvti.RegClass:$pt),
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
- fvti.RegClass:$merge,
+ fvti.RegClass:$pt,
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
(fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -1023,14 +1023,14 @@ multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name
let Predicates = GetVTypePredicates<fvti>.Predicates in
def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
fvti.RegClass:$rs1,
- (fvti.Vector fvti.RegClass:$merge),
+ (fvti.Vector fvti.RegClass:$pt),
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
- fvti.RegClass:$merge,
+ fvti.RegClass:$pt,
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
(fvti.Mask V0),
// Value to indicate no rounding mode change in
@@ -1044,11 +1044,11 @@ multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
CondCode cc> {
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
vti.RegClass:$rs2, cc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
- VR:$merge,
+ VR:$pt,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
@@ -1060,11 +1060,11 @@ multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_nam
: VPatIntegerSetCCVL_VV<vti, instruction_name, cc> {
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2),
vti.RegClass:$rs1, invcc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
- VR:$merge, vti.RegClass:$rs1,
+ VR:$pt, vti.RegClass:$rs1,
vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
@@ -1073,17 +1073,17 @@ multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_nam
defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK");
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat (XLenVT GPR:$rs2)), cc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$merge, vti.RegClass:$rs1,
+ (instruction_masked VR:$pt, vti.RegClass:$rs1,
GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1), invcc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$merge, vti.RegClass:$rs1,
+ (instruction_masked VR:$pt, vti.RegClass:$rs1,
GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
@@ -1092,20 +1092,20 @@ multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_nam
defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat_simm5 simm5:$rs2), cc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$merge, vti.RegClass:$rs1,
+ (instruction_masked VR:$pt, vti.RegClass:$rs1,
XLenVT:$rs2, (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
// FIXME: Can do some canonicalization to remove these patterns.
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$merge, vti.RegClass:$rs1,
+ (instruction_masked VR:$pt, vti.RegClass:$rs1,
simm5:$rs2, (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
}
@@ -1117,20 +1117,20 @@ multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti,
defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(splatpat_kind simm5:$rs2), cc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$merge, vti.RegClass:$rs1,
+ (instruction_masked VR:$pt, vti.RegClass:$rs1,
(DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
// FIXME: Can do some canonicalization to remove these patterns.
def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
- VR:$merge,
+ VR:$pt,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$merge, vti.RegClass:$rs1,
+ (instruction_masked VR:$pt, vti.RegClass:$rs1,
(DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
}
@@ -1143,31 +1143,31 @@ multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
fvti.RegClass:$rs2,
cc,
- VR:$merge,
+ VR:$pt,
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK")
- VR:$merge, fvti.RegClass:$rs1,
+ VR:$pt, fvti.RegClass:$rs1,
fvti.RegClass:$rs2, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
(SplatFPOp fvti.ScalarRegClass:$rs2),
cc,
- VR:$merge,
+ VR:$pt,
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
- VR:$merge, fvti.RegClass:$rs1,
+ VR:$pt, fvti.RegClass:$rs1,
fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc,
- VR:$merge,
+ VR:$pt,
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
- VR:$merge, fvti.RegClass:$rs1,
+ VR:$pt, fvti.RegClass:$rs1,
fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
}
@@ -1437,12 +1437,12 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
+ def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$pt),
(vti.Vector vti.RegClass:$rs1), VR:$rs2,
(vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (vti_m1.Vector VR:$merge),
+ (vti_m1.Vector VR:$pt),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
(vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
@@ -1454,12 +1454,12 @@ multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float>
foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge),
+ def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$pt),
(vti.Vector vti.RegClass:$rs1), VR:$rs2,
(vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (vti_m1.Vector VR:$merge),
+ (vti_m1.Vector VR:$pt),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
(vti.Mask V0),
@@ -1519,12 +1519,12 @@ multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_n
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
}
@@ -1538,12 +1538,12 @@ multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instructio
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
@@ -1561,12 +1561,12 @@ multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instru
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
}
@@ -1580,12 +1580,12 @@ multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string ins
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
@@ -2098,15 +2098,15 @@ multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
- vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2,
+ vti.RegClass:$pt, vti.RegClass:$rs1, vti.RegClass:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector (SplatPat (XLenVT GPR:$rs2))),
- vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
+ vti.RegClass:$pt, vti.RegClass:$rs1, GPR:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2127,15 +2127,15 @@ foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
(vti.Vector vti.RegClass:$rs1),
- vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
+ vti.RegClass:$pt, vti.RegClass:$rs1, GPR:$rs2,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
(vti.Vector vti.RegClass:$rs1),
- vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2,
+ vti.RegClass:$pt, vti.RegClass:$rs1, simm5:$rs2,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2157,18 +2157,18 @@ foreach vtiToWti = AllWidenableIntVectors in {
(vti.Mask V0), VLOpFrag)),
(wti.Vector (riscv_vmv_v_x_vl
(wti.Vector undef), 1, VLOpFrag)),
- wti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ wti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs1, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse
(vti.Vector vti.RegClass:$rs1),
(vti.Mask V0), VLOpFrag)),
(wti.Vector (riscv_vmv_v_x_vl
(wti.Vector undef), 1, VLOpFrag)),
- wti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
+ wti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs1, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2333,28 +2333,28 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
vti.RegClass:$rs1,
vti.RegClass:$rs2,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
- vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
(SplatPat XLenVT:$rs1),
vti.RegClass:$rs2,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
- vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
(SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
- vti.RegClass:$merge, vti.RegClass:$rs2, simm5:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, simm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
}
@@ -2505,11 +2505,11 @@ foreach vti = AllFloatVectors in {
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs1,
vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
@@ -2526,11 +2526,11 @@ foreach vti = AllFloatVectors in {
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(SplatFPOp vti.ScalarRegClass:$rs2),
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs1,
vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
@@ -2559,29 +2559,29 @@ foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in {
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
fvti.RegClass:$rs1,
fvti.RegClass:$rs2,
- fvti.RegClass:$merge,
+ fvti.RegClass:$pt,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
- fvti.RegClass:$merge, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
+ fvti.RegClass:$pt, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
(SplatFPOp (SelectFPImm (XLenVT GPR:$imm))),
fvti.RegClass:$rs2,
- fvti.RegClass:$merge,
+ fvti.RegClass:$pt,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
- fvti.RegClass:$merge, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0),
+ fvti.RegClass:$pt, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2,
- fvti.RegClass:$merge,
+ fvti.RegClass:$pt,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
- fvti.RegClass:$merge, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
+ fvti.RegClass:$pt, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
}
}
@@ -2591,10 +2591,10 @@ foreach fvti = AllFloatVectors in {
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
(SplatFPOp fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rs2,
- fvti.RegClass:$merge,
+ fvti.RegClass:$pt,
VLOpFrag)),
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
- fvti.RegClass:$merge, fvti.RegClass:$rs2,
+ fvti.RegClass:$pt, fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
(fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
}
@@ -2866,10 +2866,10 @@ foreach mti = AllMasks in {
// 16.1. Integer Scalar Move Instructions
foreach vti = NoGroupIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$pt),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
- (PseudoVMV_S_X $merge, vti.ScalarRegClass:$rs1, GPR:$vl,
+ (PseudoVMV_S_X $pt, vti.ScalarRegClass:$rs1, GPR:$vl,
vti.Log2SEW)>;
}
}
@@ -2879,26 +2879,26 @@ foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
vti.RegClass:$rs1,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2,
uimm5:$imm,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
+ vti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -2914,11 +2914,11 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector
(riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
(ivti.Vector ivti.RegClass:$rs1),
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, ivti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2926,24 +2926,24 @@ foreach vti = AllIntegerVectors in {
// 16.2. Floating-Point Scalar Move Instructions
foreach vti = NoGroupFloatVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$pt),
(vti.Scalar (fpimm0)),
VLOpFrag)),
- (PseudoVMV_S_X $merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
+ (PseudoVMV_S_X $pt, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$pt),
(vti.Scalar (SelectFPImm (XLenVT GPR:$imm))),
VLOpFrag)),
- (PseudoVMV_S_X $merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
+ (PseudoVMV_S_X $pt, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
}
}
foreach vti = AllFloatVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$pt),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
(!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
}
defvar ivti = GetIntVTypeInfo<vti>.Vti;
@@ -2951,27 +2951,27 @@ foreach vti = AllFloatVectors in {
def : Pat<(vti.Vector
(riscv_vrgather_vv_vl vti.RegClass:$rs2,
(ivti.Vector vti.RegClass:$rs1),
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector
(riscv_vrgather_vx_vl vti.RegClass:$rs2,
uimm5:$imm,
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
+ vti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -2987,11 +2987,11 @@ foreach vti = AllFloatVectors in {
def : Pat<(vti.Vector
(riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
(ivti.Vector ivti.RegClass:$rs1),
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
+ vti.RegClass:$pt, vti.RegClass:$rs2, ivti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 75fcc1e7cb110..d8a3ed3acd4ef 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -349,7 +349,7 @@ multiclass VPseudoVAESKF1 {
defvar mx = m.MX;
defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>,
SchedBinary<"WriteVAESKF1V", "ReadVAESKF1V", "ReadVAESKF1V", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -384,7 +384,7 @@ multiclass VPseudoVSM4K {
defvar mx = m.MX;
defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>,
SchedBinary<"WriteVSM4KV", "ReadVSM4KV", "ReadVSM4KV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -393,7 +393,7 @@ multiclass VPseudoVSM3ME {
defvar mx = m.MX;
defm _VV : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
SchedBinary<"WriteVSM3MEV", "ReadVSM3MEV", "ReadVSM3MEV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -402,10 +402,10 @@ multiclass VPseudoVCLMUL_VV_VX {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m>,
SchedBinary<"WriteVCLMULV", "ReadVCLMULV", "ReadVCLMULV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVCLMULX", "ReadVCLMULV", "ReadVCLMULX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -422,7 +422,7 @@ multiclass VPseudoVBREV {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoUnaryV_V<m>,
- SchedUnary<"WriteVBREVV", "ReadVBREVV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVBREVV", "ReadVBREVV", mx, forcePassthruRead=true>;
}
}
@@ -430,7 +430,7 @@ multiclass VPseudoVCLZ {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoUnaryV_V<m>,
- SchedUnary<"WriteVCLZV", "ReadVCLZV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVCLZV", "ReadVCLZV", mx, forcePassthruRead=true>;
}
}
@@ -438,7 +438,7 @@ multiclass VPseudoVCTZ {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoUnaryV_V<m>,
- SchedUnary<"WriteVCTZV", "ReadVCTZV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVCTZV", "ReadVCTZV", mx, forcePassthruRead=true>;
}
}
@@ -446,7 +446,7 @@ multiclass VPseudoVCPOP {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoUnaryV_V<m>,
- SchedUnary<"WriteVCPOPV", "ReadVCPOPV", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVCPOPV", "ReadVCPOPV", mx, forcePassthruRead=true>;
}
}
@@ -455,13 +455,13 @@ multiclass VPseudoVWSLL {
defvar mx = m.MX;
defm "" : VPseudoBinaryW_VV<m>,
SchedBinary<"WriteVWSLLV", "ReadVWSLLV", "ReadVWSLLV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryW_VX<m>,
SchedBinary<"WriteVWSLLX", "ReadVWSLLV", "ReadVWSLLX", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryW_VI<uimm5, m>,
SchedUnary<"WriteVWSLLI", "ReadVWSLLV", mx,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -469,10 +469,10 @@ multiclass VPseudoVANDN {
foreach m = MxList in {
defm "" : VPseudoBinaryV_VV<m>,
SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -480,7 +480,7 @@ multiclass VPseudoVBREV8 {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoUnaryV_V<m>,
- SchedUnary<"WriteVBREV8V", "ReadVBREV8V", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVBREV8V", "ReadVBREV8V", mx, forcePassthruRead=true>;
}
}
@@ -488,7 +488,7 @@ multiclass VPseudoVREV8 {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoUnaryV_V<m>,
- SchedUnary<"WriteVREV8V", "ReadVREV8V", mx, forceMergeOpRead=true>;
+ SchedUnary<"WriteVREV8V", "ReadVREV8V", mx, forcePassthruRead=true>;
}
}
@@ -496,10 +496,10 @@ multiclass VPseudoVROT_VV_VX {
foreach m = MxList in {
defm "" : VPseudoBinaryV_VV<m>,
SchedBinary<"WriteVRotV", "ReadVRotV", "ReadVRotV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVRotX", "ReadVRotV", "ReadVRotX", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -508,7 +508,7 @@ multiclass VPseudoVROT_VV_VX_VI
foreach m = MxList in {
defm "" : VPseudoBinaryV_VI<uimm6, m>,
SchedUnary<"WriteVRotI", "ReadVRotV", m.MX,
- forceMergeOpRead=true>;
+ forcePassthruRead=true>;
}
}
@@ -691,11 +691,11 @@ multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
let Predicates = !listconcat([predicate],
GetVTypePredicates<vti>.Predicates) in {
def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
- (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$pt),
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs1,
(vti.Mask V0),
GPR:$vl,
@@ -711,15 +711,15 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl
(vti.Vector vti.RegClass:$rs1),
(riscv_splat_vector -1),
- (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$pt),
(vti.Mask V0),
VLOpFrag),
(vti.Vector vti.RegClass:$rs2),
- (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$pt),
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs2,
vti.RegClass:$rs1,
(vti.Mask V0),
@@ -730,11 +730,11 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector
(not vti.ScalarRegClass:$rs1)),
(vti.Vector vti.RegClass:$rs2),
- (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$pt),
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs2,
vti.ScalarRegClass:$rs1,
(vti.Mask V0),
@@ -758,10 +758,10 @@ foreach vti = AllIntegerVectors in {
GetVTypePredicates<vti>.Predicates) in {
def : Pat<(riscv_rotl_vl vti.RegClass:$rs2,
(vti.Vector (SplatPat_uimm6 uimm6:$rs1)),
- (vti.Vector vti.RegClass:$merge),
+ (vti.Vector vti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$merge,
+ vti.RegClass:$pt,
vti.RegClass:$rs2,
(!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1),
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
@@ -778,10 +778,10 @@ foreach vtiToWti = AllWidenableIntVectors in {
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
@@ -791,19 +791,19 @@ foreach vtiToWti = AllWidenableIntVectors in {
(wti.Vector (riscv_ext_vl_oneuse
(vti.Vector vti.RegClass:$rs1),
(vti.Mask V0), VLOpFrag)),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
@@ -811,19 +811,19 @@ foreach vtiToWti = AllWidenableIntVectors in {
(vti.Vector vti.RegClass:$rs2),
(vti.Mask V0), VLOpFrag)),
(wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (SplatPat_uimm5 uimm5:$rs1)),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
@@ -831,37 +831,37 @@ foreach vtiToWti = AllWidenableIntVectors in {
(vti.Vector vti.RegClass:$rs2),
(vti.Mask V0), VLOpFrag)),
(wti.Vector (SplatPat_uimm5 uimm5:$rs1)),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector (SplatPat_uimm5 uimm5:$rs1)),
- (wti.Vector wti.RegClass:$merge),
+ (wti.Vector wti.RegClass:$pt),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$rs1,
+ wti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -989,11 +989,11 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
!if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW,
instruction#"_VI_"#vti.LMul.MX));
let Predicates = GetVTypePredicates<vti>.Predicates in
- def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs2),
(XLenVT uimm6:$rs1),
VLOpFrag)),
- (Pseudo (vti.Vector vti.RegClass:$merge),
+ (Pseudo (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs2),
(InvRot64Imm uimm6:$rs1),
GPR:$vl, vti.Log2SEW, TU_MU)>;
@@ -1003,12 +1003,12 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
!if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK",
instruction#"_VI_"#vti.LMul.MX#"_MASK"));
let Predicates = GetVTypePredicates<vti>.Predicates in
- def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$merge),
+ def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs2),
(XLenVT uimm6:$rs1),
(vti.Mask V0),
VLOpFrag, (XLenVT timm:$policy))),
- (PseudoMask (vti.Vector vti.RegClass:$merge),
+ (PseudoMask (vti.Vector vti.RegClass:$pt),
(vti.Vector vti.RegClass:$rs2),
(InvRot64Imm uimm6:$rs1),
(vti.Mask V0),
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index b2991145ee65c..0b0ac0c368d07 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -1287,11 +1287,11 @@ def : ReadAdvance<ReadVMov8V, 0>;
// Others
def : ReadAdvance<ReadVMask, 0>;
-def : ReadAdvance<ReadVMergeOp_WorstCase, 0>;
+def : ReadAdvance<ReadVPassthru_WorstCase, 0>;
foreach mx = SchedMxList in {
- def : ReadAdvance<!cast<SchedRead>("ReadVMergeOp_" # mx), 0>;
+ def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx), 0>;
foreach sew = SchedSEWSet<mx>.val in
- def : ReadAdvance<!cast<SchedRead>("ReadVMergeOp_" # mx # "_E" # sew), 0>;
+ def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx # "_E" # sew), 0>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
index ba062f33b929a..59972d781a315 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
@@ -1086,11 +1086,11 @@ def : ReadAdvance<ReadVMov8V, 0>;
// Others
def : ReadAdvance<ReadVMask, 0>;
-def : ReadAdvance<ReadVMergeOp_WorstCase, 0>;
+def : ReadAdvance<ReadVPassthru_WorstCase, 0>;
foreach mx = SchedMxList in {
- def : ReadAdvance<!cast<SchedRead>("ReadVMergeOp_" # mx), 0>;
+ def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx), 0>;
foreach sew = SchedSEWSet<mx>.val in
- def : ReadAdvance<!cast<SchedRead>("ReadVMergeOp_" # mx # "_E" # sew), 0>;
+ def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx # "_E" # sew), 0>;
}
// Vector Crypto Extensions
diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index 449611c583036..95fde1e53c805 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -766,11 +766,11 @@ def ReadVMov8V : SchedRead;
// Others
def ReadVMask : SchedRead;
-def ReadVMergeOp_WorstCase : SchedRead;
+def ReadVPassthru_WorstCase : SchedRead;
foreach mx = SchedMxList in {
- def ReadVMergeOp_ # mx : SchedRead;
+ def ReadVPassthru_ # mx : SchedRead;
foreach sew = SchedSEWSet<mx>.val in
- def ReadVMergeOp_ # mx # "_E" # sew : SchedRead;
+ def ReadVPassthru_ # mx # "_E" # sew : SchedRead;
}
//===----------------------------------------------------------------------===//
@@ -1139,11 +1139,11 @@ def : ReadAdvance<ReadVMov8V, 0>;
// Others
def : ReadAdvance<ReadVMask, 0>;
-def : ReadAdvance<ReadVMergeOp_WorstCase, 0>;
+def : ReadAdvance<ReadVPassthru_WorstCase, 0>;
foreach mx = SchedMxList in {
- def : ReadAdvance<!cast<SchedRead>("ReadVMergeOp_" # mx), 0>;
+ def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx), 0>;
foreach sew = SchedSEWSet<mx>.val in
- def : ReadAdvance<!cast<SchedRead>("ReadVMergeOp_" # mx # "_E" # sew), 0>;
+ def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx # "_E" # sew), 0>;
}
} // Unsupported
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index 20c014a6782a9..979677ee92332 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -254,11 +254,12 @@ bool RISCVVectorPeephole::convertVMergeToVMv(MachineInstr &MI) const {
CASE_VMERGE_TO_VMV(M8)
}
- Register MergeReg = MI.getOperand(1).getReg();
+ Register PassthruReg = MI.getOperand(1).getReg();
Register FalseReg = MI.getOperand(2).getReg();
- // Check merge == false (or merge == undef)
- if (MergeReg != RISCV::NoRegister && TRI->lookThruCopyLike(MergeReg, MRI) !=
- TRI->lookThruCopyLike(FalseReg, MRI))
+ // Check passthru == false (or passthru == undef)
+ if (PassthruReg != RISCV::NoRegister &&
+ TRI->lookThruCopyLike(PassthruReg, MRI) !=
+ TRI->lookThruCopyLike(FalseReg, MRI))
return false;
assert(MI.getOperand(4).isReg() && MI.getOperand(4).getReg() == RISCV::V0);
@@ -266,14 +267,14 @@ bool RISCVVectorPeephole::convertVMergeToVMv(MachineInstr &MI) const {
return false;
MI.setDesc(TII->get(NewOpc));
- MI.removeOperand(1); // Merge operand
+ MI.removeOperand(1); // Passthru operand
MI.tieOperands(0, 1); // Tie false to dest
MI.removeOperand(3); // Mask operand
MI.addOperand(
MachineOperand::CreateImm(RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED));
// vmv.v.v doesn't have a mask operand, so we may be able to inflate the
- // register class for the destination and merge operands e.g. VRNoV0 -> VR
+ // register class for the destination and passthru operands e.g. VRNoV0 -> VR
MRI->recomputeRegClass(MI.getOperand(0).getReg());
MRI->recomputeRegClass(MI.getOperand(1).getReg());
return true;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
index 390647fd9e6c6..578b5dc6a2560 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
@@ -23,7 +23,7 @@ entry:
ret i64 %1
}
-define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, ptr %p, i64 %vl) {
+define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %passthru, ptr %p, i64 %vl) {
; CHECK-LABEL: name: test_vleff_nxv8i8_tu
; CHECK: bb.0.entry:
; CHECK-NEXT: liveins: $v8, $x10, $x11
@@ -35,7 +35,7 @@ define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, ptr %p, i64 %vl) {
; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
; CHECK-NEXT: PseudoRET implicit $x10
entry:
- %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, ptr %p, i64 %vl)
+ %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %passthru, ptr %p, i64 %vl)
%1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
ret i64 %1
}
>From e415934702f532b54b937b53b915588136fa2e26 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Wed, 24 Jul 2024 23:59:18 +0800
Subject: [PATCH 2/2] Rename $pt -> $passthru in patterns
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 244 +++++++++---------
.../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 240 ++++++++---------
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 62 ++---
3 files changed, 273 insertions(+), 273 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 5e5118affb5b3..5acab920caeea 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -777,7 +777,7 @@ class VPseudoUSLoadNoMask<VReg RetClass,
class VPseudoUSLoadMask<VReg RetClass,
int EEW> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
GPRMem:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -785,7 +785,7 @@ class VPseudoUSLoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -811,7 +811,7 @@ class VPseudoUSLoadFFNoMask<VReg RetClass,
class VPseudoUSLoadFFMask<VReg RetClass,
int EEW> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
GPRMem:$rs1,
VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -819,7 +819,7 @@ class VPseudoUSLoadFFMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -845,7 +845,7 @@ class VPseudoSLoadNoMask<VReg RetClass,
class VPseudoSLoadMask<VReg RetClass,
int EEW> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
GPRMem:$rs1, GPR:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -853,7 +853,7 @@ class VPseudoSLoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -890,7 +890,7 @@ class VPseudoILoadMask<VReg RetClass,
bit EarlyClobber,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
GPRMem:$rs1, IdxClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -898,7 +898,7 @@ class VPseudoILoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $pt", "$rd = $pt");
+ let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $passthru", "$rd = $passthru");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -963,13 +963,13 @@ class VPseudoSStoreMask<VReg StClass,
class VPseudoNullaryNoMask<VReg RegClass> :
Pseudo<(outs RegClass:$rd),
- (ins RegClass:$pt,
+ (ins RegClass:$passthru,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -977,13 +977,13 @@ class VPseudoNullaryNoMask<VReg RegClass> :
class VPseudoNullaryMask<VReg RegClass> :
Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
- (ins GetVRegNoV0<RegClass>.R:$pt,
+ (ins GetVRegNoV0<RegClass>.R:$passthru,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints ="$rd = $pt";
+ let Constraints ="$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let UsesMaskPolicy = 1;
@@ -1012,13 +1012,13 @@ class VPseudoUnaryNoMask<DAGOperand RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, OpClass:$rs2,
+ (ins RetClass:$passthru, OpClass:$rs2,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1046,13 +1046,13 @@ class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, OpClass:$rs2, ixlenimm:$rm,
+ (ins RetClass:$passthru, OpClass:$rs2, ixlenimm:$rm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1066,13 +1066,13 @@ class VPseudoUnaryMask<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1085,14 +1085,14 @@ class VPseudoUnaryMaskRoundingMode<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
VMaskOp:$vm, ixlenimm:$rm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1106,12 +1106,12 @@ class VPseudoUnaryMask_NoExcept<VReg RetClass,
VReg OpClass,
string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []> {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1124,13 +1124,13 @@ class VPseudoUnaryNoMask_FRM<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, OpClass:$rs2, ixlenimm:$frm,
+ (ins RetClass:$passthru, OpClass:$rs2, ixlenimm:$frm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1143,14 +1143,14 @@ class VPseudoUnaryMask_FRM<VReg RetClass,
string Constraint = "",
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt, OpClass:$rs2,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
VMaskOp:$vm, ixlenimm:$frm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1185,13 +1185,13 @@ class VPseudoUnaryMaskGPROut :
class VPseudoUnaryAnyMask<VReg RetClass,
VReg Op1Class> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, Op1Class:$rs2,
+ (ins RetClass:$passthru, Op1Class:$rs2,
VR:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "@earlyclobber $rd, $rd = $pt";
+ let Constraints = "@earlyclobber $rd, $rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
}
@@ -1219,13 +1219,13 @@ class VPseudoBinaryNoMaskPolicy<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
+ (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1239,12 +1239,12 @@ class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
int UsesVXRM_ = 1,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
+ (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1260,14 +1260,14 @@ class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
int UsesVXRM_,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, ixlenimm:$rm, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1358,14 +1358,14 @@ class VPseudoBinaryMaskPolicy<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1377,14 +1377,14 @@ class VPseudoTernaryMaskPolicy<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1394,7 +1394,7 @@ class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm,
ixlenimm:$rm,
@@ -1403,7 +1403,7 @@ class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1418,14 +1418,14 @@ class VPseudoBinaryMOutMask<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt,
+ (ins RetClass:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1440,14 +1440,14 @@ class VPseudoTiedBinaryMask<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1461,7 +1461,7 @@ class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt,
+ (ins GetVRegNoV0<RetClass>.R:$passthru,
Op2Class:$rs1,
VMaskOp:$vm,
ixlenimm:$rm,
@@ -1470,7 +1470,7 @@ class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $pt"], ",");
+ let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1511,13 +1511,13 @@ class VPseudoTiedBinaryCarryIn<VReg RetClass,
LMULInfo MInfo,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, Op1Class:$rs2, Op2Class:$rs1,
+ (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1,
VMV0:$carry, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -1602,14 +1602,14 @@ class VPseudoUSSegLoadMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1637,14 +1637,14 @@ class VPseudoUSSegLoadFFMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
- (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1655,7 +1655,7 @@ class VPseudoSSegLoadNoMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, GPRMem:$rs1, GPR:$offset, AVL:$vl,
+ (ins RetClass:$passthru, GPRMem:$rs1, GPR:$offset, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
@@ -1665,14 +1665,14 @@ class VPseudoSSegLoadNoMask<VReg RetClass,
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
}
class VPseudoSSegLoadMask<VReg RetClass,
int EEW,
bits<4> NF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -1680,7 +1680,7 @@ class VPseudoSSegLoadMask<VReg RetClass,
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = "$rd = $pt";
+ let Constraints = "$rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1694,7 +1694,7 @@ class VPseudoISegLoadNoMask<VReg RetClass,
bits<4> NF,
bit Ordered> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$pt, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
+ (ins RetClass:$passthru, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo,
RISCVVLXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
@@ -1703,7 +1703,7 @@ class VPseudoISegLoadNoMask<VReg RetClass,
let hasSideEffects = 0;
// For vector indexed segment loads, the destination vector register groups
// cannot overlap the source vector register group
- let Constraints = "@earlyclobber $rd, $rd = $pt";
+ let Constraints = "@earlyclobber $rd, $rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -1716,7 +1716,7 @@ class VPseudoISegLoadMask<VReg RetClass,
bits<4> NF,
bit Ordered> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
- (ins GetVRegNoV0<RetClass>.R:$pt, GPRMem:$rs1,
+ (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
ixlenimm:$policy), []>,
RISCVVPseudo,
@@ -1726,7 +1726,7 @@ class VPseudoISegLoadMask<VReg RetClass,
let hasSideEffects = 0;
// For vector indexed segment loads, the destination vector register groups
// cannot overlap the source vector register group
- let Constraints = "@earlyclobber $rd, $rd = $pt";
+ let Constraints = "@earlyclobber $rd, $rd = $passthru";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
@@ -3922,14 +3922,14 @@ class VPatUnaryNoMask<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
inst#"_"#kind#"_"#vlmul.MX))
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
GPR:$vl, log2sew, TU_MU)>;
@@ -3944,7 +3944,7 @@ class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(XLenVT timm:$round),
VLOpFrag)),
@@ -3952,7 +3952,7 @@ class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
inst#"_"#kind#"_"#vlmul.MX))
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(XLenVT timm:$round),
GPR:$vl, log2sew, TU_MU)>;
@@ -3968,7 +3968,7 @@ class VPatUnaryNoMaskRTZ<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(XLenVT 0b001),
VLOpFrag)),
@@ -3976,7 +3976,7 @@ class VPatUnaryNoMaskRTZ<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
inst#"_"#kind#"_"#vlmul.MX))
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
GPR:$vl, log2sew, TU_MU)>;
@@ -3992,7 +3992,7 @@ class VPatUnaryMask<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
@@ -4000,7 +4000,7 @@ class VPatUnaryMask<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
@@ -4016,7 +4016,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
(XLenVT timm:$round),
@@ -4025,7 +4025,7 @@ class VPatUnaryMaskRoundingMode<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
(XLenVT timm:$round),
@@ -4043,7 +4043,7 @@ class VPatUnaryMaskRTZ<string intrinsic_name,
VReg op2_reg_class,
bit isSEWAware = 0> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
(XLenVT 0b001),
@@ -4052,7 +4052,7 @@ class VPatUnaryMaskRTZ<string intrinsic_name,
!if(isSEWAware,
inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
GPR:$vl, log2sew, (XLenVT timm:$policy))>;
@@ -4071,12 +4071,12 @@ class VPatMaskUnaryMask<string intrinsic_name,
string inst,
MTypeInfo mti> :
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
- (mti.Mask VR:$pt),
+ (mti.Mask VR:$passthru),
(mti.Mask VR:$rs2),
(mti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
- (mti.Mask VR:$pt),
+ (mti.Mask VR:$passthru),
(mti.Mask VR:$rs2),
(mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
@@ -4091,12 +4091,12 @@ class VPatUnaryAnyMask<string intrinsic,
VReg result_reg_class,
VReg op1_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
GPR:$vl, log2sew)>;
@@ -4128,12 +4128,12 @@ class VPatBinaryNoMaskTU<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
GPR:$vl, sew, TU_MU)>;
@@ -4148,13 +4148,13 @@ class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
VLOpFrag)),
(!cast<Instruction>(inst)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
@@ -4190,13 +4190,13 @@ class VPatBinaryMask<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
@@ -4212,13 +4212,13 @@ class VPatBinaryMaskPolicy<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4234,14 +4234,14 @@ class VPatBinaryMaskPolicyRoundingMode<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
@@ -4260,13 +4260,13 @@ class VPatBinaryMaskSwapped<string intrinsic_name,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
@@ -4315,12 +4315,12 @@ class VPatTiedBinaryNoMaskTU<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$pt),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_TIED")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
GPR:$vl, sew, TU_MU)>;
@@ -4332,13 +4332,13 @@ class VPatTiedBinaryNoMaskTURoundingMode<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$pt),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
VLOpFrag)),
(!cast<Instruction>(inst#"_TIED")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(XLenVT timm:$round),
GPR:$vl, sew, TU_MU)>;
@@ -4352,13 +4352,13 @@ class VPatTiedBinaryMask<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
@@ -4371,14 +4371,14 @@ class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
- (result_type result_reg_class:$pt),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(mask_type V0),
(XLenVT timm:$round),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op2_type op2_kind:$rs2),
(mask_type V0),
(XLenVT timm:$round),
@@ -4678,15 +4678,15 @@ multiclass VPatNullaryV<string intrinsic, string instruction> {
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
- (vti.Vector vti.RegClass:$pt),
+ (vti.Vector vti.RegClass:$passthru),
VLOpFrag)),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
- vti.RegClass:$pt, GPR:$vl, vti.Log2SEW, TU_MU)>;
+ vti.RegClass:$passthru, GPR:$vl, vti.Log2SEW, TU_MU)>;
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
- (vti.Vector vti.RegClass:$pt),
+ (vti.Vector vti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
- vti.RegClass:$pt, (vti.Mask V0),
+ vti.RegClass:$passthru, (vti.Mask V0),
GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
}
}
@@ -4781,13 +4781,13 @@ multiclass VPatBinaryCarryInTAIL<string intrinsic,
VReg op1_reg_class,
DAGOperand op2_kind> {
def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
@@ -6065,12 +6065,12 @@ multiclass VPatCompare_VI<string intrinsic, string inst,
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
let Predicates = GetVTypePredicates<vti>.Predicates in
- def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$pt),
+ def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$passthru),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar ImmType:$rs2),
(vti.Mask V0),
VLOpFrag)),
- (PseudoMask VR:$pt, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
+ (PseudoMask VR:$passthru, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
}
@@ -6215,24 +6215,24 @@ foreach vti = AllIntegerVectors in {
// to use a more complex splat sequence. Add the pattern for all VTs for
// consistency.
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX)
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
GPR:$vl,
vti.Log2SEW, TU_MU)>;
- def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
(vti.Mask V0),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
(vti.Mask V0),
@@ -6241,24 +6241,24 @@ foreach vti = AllIntegerVectors in {
(XLenVT timm:$policy))>;
// Match VSUB with a small immediate to vadd.vi by negating the immediate.
- def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
VLOpFrag)),
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX)
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs1,
(NegImm simm5_plus1:$rs2),
GPR:$vl,
vti.Log2SEW, TU_MU)>;
- def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(vti.Mask V0),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs1,
(NegImm simm5_plus1:$rs2),
(vti.Mask V0),
@@ -6907,20 +6907,20 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
foreach vti = AllIntegerVectors in {
// Emit shift by 1 as an add since it might be faster.
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs1),
(XLenVT 1), VLOpFrag)),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX)
- vti.RegClass:$pt, vti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs1,
vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
- def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs1),
(XLenVT 1),
(vti.Mask V0),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs1,
vti.RegClass:$rs1,
(vti.Mask V0),
@@ -7258,11 +7258,11 @@ foreach vti = AllFloatVectors in {
foreach fvti = AllFloatVectors in {
defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
let Predicates = GetVTypePredicates<fvti>.Predicates in
- def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$pt),
+ def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru),
(fvti.Vector fvti.RegClass:$rs2),
(fvti.Scalar (fpimm0)),
(fvti.Mask V0), VLOpFrag)),
- (instr fvti.RegClass:$pt, fvti.RegClass:$rs2, 0,
+ (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0,
(fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index ad585d3a46fee..7bf4ad2312dea 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -149,18 +149,18 @@ def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp
def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>;
-def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- [(riscv_fadd_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
-def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- [(riscv_fsub_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
-def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- [(riscv_fmul_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
-def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl),
- (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$pt, node:$mask, node:$vl)]>;
+def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ [(riscv_fadd_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>;
+def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ [(riscv_fsub_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>;
+def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ [(riscv_fmul_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>;
+def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl),
+ (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>;
def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
[(riscv_fsqrt_vl node:$src, node:$mask, node:$vl),
(riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>;
@@ -318,12 +318,12 @@ def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$v
def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>;
def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>;
def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>;
-def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
- [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
- (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl)]>;
-def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
- [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl),
- (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$pt, node:$mask, node:$vl)]>;
+def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl),
+ [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl),
+ (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>;
+def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl),
+ [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl),
+ (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>;
def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL",
SDTypeProfile<1, 5, [SDTCisVec<0>,
@@ -640,14 +640,14 @@ class VPatBinaryVL_V<SDPatternOperator vop,
: Pat<(result_type (vop
(op1_type op1_reg_class:$rs1),
(op2_type op2_reg_class:$rs2),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$pt,
+ result_reg_class:$passthru,
op1_reg_class:$rs1,
op2_reg_class:$rs2,
(mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
@@ -668,14 +668,14 @@ class VPatBinaryVL_V_RM<SDPatternOperator vop,
: Pat<(result_type (vop
(op1_type op1_reg_class:$rs1),
(op2_type op2_reg_class:$rs2),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$pt,
+ result_reg_class:$passthru,
op1_reg_class:$rs1,
op2_reg_class:$rs2,
(mask_type V0),
@@ -800,14 +800,14 @@ class VPatBinaryVL_XI<SDPatternOperator vop,
: Pat<(result_type (vop
(vop1_type vop_reg_class:$rs1),
(vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#_#suffix#_#vlmul.MX#"_MASK"))
- result_reg_class:$pt,
+ result_reg_class:$passthru,
vop_reg_class:$rs1,
xop_kind:$rs2,
(mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
@@ -924,14 +924,14 @@ class VPatBinaryVL_VF<SDPatternOperator vop,
bit isSEWAware = 0>
: Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
(vop2_type (SplatFPOp scalar_reg_class:$rs2)),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$pt,
+ result_reg_class:$passthru,
vop_reg_class:$rs1,
scalar_reg_class:$rs2,
(mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
@@ -950,14 +950,14 @@ class VPatBinaryVL_VF_RM<SDPatternOperator vop,
bit isSEWAware = 0>
: Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
(vop2_type (SplatFPOp scalar_reg_class:$rs2)),
- (result_type result_reg_class:$pt),
+ (result_type result_reg_class:$passthru),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
instruction_name#"_"#vlmul.MX#"_MASK"))
- result_reg_class:$pt,
+ result_reg_class:$passthru,
vop_reg_class:$rs1,
scalar_reg_class:$rs2,
(mask_type V0),
@@ -1004,14 +1004,14 @@ multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name,
let Predicates = GetVTypePredicates<fvti>.Predicates in
def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
fvti.RegClass:$rs1,
- (fvti.Vector fvti.RegClass:$pt),
+ (fvti.Vector fvti.RegClass:$passthru),
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
- fvti.RegClass:$pt,
+ fvti.RegClass:$passthru,
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
(fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -1023,14 +1023,14 @@ multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name
let Predicates = GetVTypePredicates<fvti>.Predicates in
def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
fvti.RegClass:$rs1,
- (fvti.Vector fvti.RegClass:$pt),
+ (fvti.Vector fvti.RegClass:$passthru),
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(
!if(isSEWAware,
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK",
instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK"))
- fvti.RegClass:$pt,
+ fvti.RegClass:$passthru,
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
(fvti.Mask V0),
// Value to indicate no rounding mode change in
@@ -1044,11 +1044,11 @@ multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
CondCode cc> {
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
vti.RegClass:$rs2, cc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
- VR:$pt,
+ VR:$passthru,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
@@ -1060,11 +1060,11 @@ multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_nam
: VPatIntegerSetCCVL_VV<vti, instruction_name, cc> {
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2),
vti.RegClass:$rs1, invcc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
- VR:$pt, vti.RegClass:$rs1,
+ VR:$passthru, vti.RegClass:$rs1,
vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
@@ -1073,17 +1073,17 @@ multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_nam
defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK");
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat (XLenVT GPR:$rs2)), cc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$pt, vti.RegClass:$rs1,
+ (instruction_masked VR:$passthru, vti.RegClass:$rs1,
GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1), invcc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$pt, vti.RegClass:$rs1,
+ (instruction_masked VR:$passthru, vti.RegClass:$rs1,
GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
@@ -1092,20 +1092,20 @@ multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_nam
defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat_simm5 simm5:$rs2), cc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$pt, vti.RegClass:$rs1,
+ (instruction_masked VR:$passthru, vti.RegClass:$rs1,
XLenVT:$rs2, (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
// FIXME: Can do some canonicalization to remove these patterns.
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$pt, vti.RegClass:$rs1,
+ (instruction_masked VR:$passthru, vti.RegClass:$rs1,
simm5:$rs2, (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
}
@@ -1117,20 +1117,20 @@ multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti,
defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(splatpat_kind simm5:$rs2), cc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$pt, vti.RegClass:$rs1,
+ (instruction_masked VR:$passthru, vti.RegClass:$rs1,
(DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
// FIXME: Can do some canonicalization to remove these patterns.
def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
- VR:$pt,
+ VR:$passthru,
(vti.Mask V0),
VLOpFrag)),
- (instruction_masked VR:$pt, vti.RegClass:$rs1,
+ (instruction_masked VR:$passthru, vti.RegClass:$rs1,
(DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
vti.Log2SEW)>;
}
@@ -1143,31 +1143,31 @@ multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
fvti.RegClass:$rs2,
cc,
- VR:$pt,
+ VR:$passthru,
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK")
- VR:$pt, fvti.RegClass:$rs1,
+ VR:$passthru, fvti.RegClass:$rs1,
fvti.RegClass:$rs2, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
(SplatFPOp fvti.ScalarRegClass:$rs2),
cc,
- VR:$pt,
+ VR:$passthru,
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
- VR:$pt, fvti.RegClass:$rs1,
+ VR:$passthru, fvti.RegClass:$rs1,
fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc,
- VR:$pt,
+ VR:$passthru,
(fvti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
- VR:$pt, fvti.RegClass:$rs1,
+ VR:$passthru, fvti.RegClass:$rs1,
fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
}
@@ -1437,12 +1437,12 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$pt),
+ def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$passthru),
(vti.Vector vti.RegClass:$rs1), VR:$rs2,
(vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (vti_m1.Vector VR:$pt),
+ (vti_m1.Vector VR:$passthru),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
(vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
@@ -1454,12 +1454,12 @@ multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float>
foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$pt),
+ def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$passthru),
(vti.Vector vti.RegClass:$rs1), VR:$rs2,
(vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (vti_m1.Vector VR:$pt),
+ (vti_m1.Vector VR:$passthru),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
(vti.Mask V0),
@@ -1519,12 +1519,12 @@ multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_n
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
}
@@ -1538,12 +1538,12 @@ multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instructio
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
@@ -1561,12 +1561,12 @@ multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instru
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW,
(XLenVT timm:$policy))>;
}
@@ -1580,12 +1580,12 @@ multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string ins
defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
GetVTypePredicates<wti>.Predicates) in {
- def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$pt),
+ def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru),
(wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
VR:$rs2, (vti.Mask V0), VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- (wti_m1.Vector VR:$pt), (vti.Vector vti.RegClass:$rs1),
+ (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1),
(wti_m1.Vector VR:$rs2), (vti.Mask V0),
// Value to indicate no rounding mode change in
// RISCVInsertReadWriteCSR
@@ -2098,15 +2098,15 @@ multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
- vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs1, vti.RegClass:$rs2,
+ vti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector (SplatPat (XLenVT GPR:$rs2))),
- vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs1, GPR:$rs2,
+ vti.RegClass:$passthru, vti.RegClass:$rs1, GPR:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2127,15 +2127,15 @@ foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
(vti.Vector vti.RegClass:$rs1),
- vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs1, GPR:$rs2,
+ vti.RegClass:$passthru, vti.RegClass:$rs1, GPR:$rs2,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
(vti.Vector vti.RegClass:$rs1),
- vti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
+ vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs1, simm5:$rs2,
+ vti.RegClass:$passthru, vti.RegClass:$rs1, simm5:$rs2,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2157,18 +2157,18 @@ foreach vtiToWti = AllWidenableIntVectors in {
(vti.Mask V0), VLOpFrag)),
(wti.Vector (riscv_vmv_v_x_vl
(wti.Vector undef), 1, VLOpFrag)),
- wti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
+ wti.RegClass:$passthru, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs1, vti.RegClass:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse
(vti.Vector vti.RegClass:$rs1),
(vti.Mask V0), VLOpFrag)),
(wti.Vector (riscv_vmv_v_x_vl
(wti.Vector undef), 1, VLOpFrag)),
- wti.RegClass:$pt, (vti.Mask V0), VLOpFrag),
+ wti.RegClass:$passthru, (vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs1, vti.RegClass:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2333,28 +2333,28 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
vti.RegClass:$rs1,
vti.RegClass:$rs2,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
- vti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
(SplatPat XLenVT:$rs1),
vti.RegClass:$rs2,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
- vti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0),
(SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
- vti.RegClass:$pt, vti.RegClass:$rs2, simm5:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, simm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
}
}
@@ -2505,11 +2505,11 @@ foreach vti = AllFloatVectors in {
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs1,
vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
@@ -2526,11 +2526,11 @@ foreach vti = AllFloatVectors in {
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(SplatFPOp vti.ScalarRegClass:$rs2),
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs1,
vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
TAIL_AGNOSTIC)>;
@@ -2559,29 +2559,29 @@ foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in {
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
fvti.RegClass:$rs1,
fvti.RegClass:$rs2,
- fvti.RegClass:$pt,
+ fvti.RegClass:$passthru,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
- fvti.RegClass:$pt, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
(SplatFPOp (SelectFPImm (XLenVT GPR:$imm))),
fvti.RegClass:$rs2,
- fvti.RegClass:$pt,
+ fvti.RegClass:$passthru,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
- fvti.RegClass:$pt, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0),
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2,
- fvti.RegClass:$pt,
+ fvti.RegClass:$passthru,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
- fvti.RegClass:$pt, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
GPR:$vl, fvti.Log2SEW)>;
}
}
@@ -2591,10 +2591,10 @@ foreach fvti = AllFloatVectors in {
def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0),
(SplatFPOp fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rs2,
- fvti.RegClass:$pt,
+ fvti.RegClass:$passthru,
VLOpFrag)),
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
- fvti.RegClass:$pt, fvti.RegClass:$rs2,
+ fvti.RegClass:$passthru, fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
(fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
}
@@ -2866,10 +2866,10 @@ foreach mti = AllMasks in {
// 16.1. Integer Scalar Move Instructions
foreach vti = NoGroupIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$passthru),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
- (PseudoVMV_S_X $pt, vti.ScalarRegClass:$rs1, GPR:$vl,
+ (PseudoVMV_S_X $passthru, vti.ScalarRegClass:$rs1, GPR:$vl,
vti.Log2SEW)>;
}
}
@@ -2879,26 +2879,26 @@ foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
vti.RegClass:$rs1,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2,
uimm5:$imm,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$imm,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -2914,11 +2914,11 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector
(riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
(ivti.Vector ivti.RegClass:$rs1),
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, ivti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, ivti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -2926,24 +2926,24 @@ foreach vti = AllIntegerVectors in {
// 16.2. Floating-Point Scalar Move Instructions
foreach vti = NoGroupFloatVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
(vti.Scalar (fpimm0)),
VLOpFrag)),
- (PseudoVMV_S_X $pt, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
- def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$pt),
+ (PseudoVMV_S_X $passthru, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
(vti.Scalar (SelectFPImm (XLenVT GPR:$imm))),
VLOpFrag)),
- (PseudoVMV_S_X $pt, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
+ (PseudoVMV_S_X $passthru, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
}
}
foreach vti = AllFloatVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
- def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
(!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
}
defvar ivti = GetIntVTypeInfo<vti>.Vti;
@@ -2951,27 +2951,27 @@ foreach vti = AllFloatVectors in {
def : Pat<(vti.Vector
(riscv_vrgather_vv_vl vti.RegClass:$rs2,
(ivti.Vector vti.RegClass:$rs1),
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector
(riscv_vrgather_vx_vl vti.RegClass:$rs2,
uimm5:$imm,
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$imm,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -2987,11 +2987,11 @@ foreach vti = AllFloatVectors in {
def : Pat<(vti.Vector
(riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
(ivti.Vector ivti.RegClass:$rs1),
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
- vti.RegClass:$pt, vti.RegClass:$rs2, ivti.RegClass:$rs1,
+ vti.RegClass:$passthru, vti.RegClass:$rs2, ivti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index d8a3ed3acd4ef..cd03ac257e321 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -691,11 +691,11 @@ multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
let Predicates = !listconcat([predicate],
GetVTypePredicates<vti>.Predicates) in {
def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
- (vti.Vector vti.RegClass:$pt),
+ (vti.Vector vti.RegClass:$passthru),
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs1,
(vti.Mask V0),
GPR:$vl,
@@ -711,15 +711,15 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl
(vti.Vector vti.RegClass:$rs1),
(riscv_splat_vector -1),
- (vti.Vector vti.RegClass:$pt),
+ (vti.Vector vti.RegClass:$passthru),
(vti.Mask V0),
VLOpFrag),
(vti.Vector vti.RegClass:$rs2),
- (vti.Vector vti.RegClass:$pt),
+ (vti.Vector vti.RegClass:$passthru),
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs2,
vti.RegClass:$rs1,
(vti.Mask V0),
@@ -730,11 +730,11 @@ foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector
(not vti.ScalarRegClass:$rs1)),
(vti.Vector vti.RegClass:$rs2),
- (vti.Vector vti.RegClass:$pt),
+ (vti.Vector vti.RegClass:$passthru),
(vti.Mask V0),
VLOpFrag)),
(!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs2,
vti.ScalarRegClass:$rs1,
(vti.Mask V0),
@@ -758,10 +758,10 @@ foreach vti = AllIntegerVectors in {
GetVTypePredicates<vti>.Predicates) in {
def : Pat<(riscv_rotl_vl vti.RegClass:$rs2,
(vti.Vector (SplatPat_uimm6 uimm6:$rs1)),
- (vti.Vector vti.RegClass:$pt),
+ (vti.Vector vti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVROR_VI_"#vti.LMul.MX#"_MASK")
- vti.RegClass:$pt,
+ vti.RegClass:$passthru,
vti.RegClass:$rs2,
(!cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm") uimm6:$rs1),
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
@@ -778,10 +778,10 @@ foreach vtiToWti = AllWidenableIntVectors in {
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1))),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
@@ -791,19 +791,19 @@ foreach vtiToWti = AllWidenableIntVectors in {
(wti.Vector (riscv_ext_vl_oneuse
(vti.Vector vti.RegClass:$rs1),
(vti.Mask V0), VLOpFrag)),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
@@ -811,19 +811,19 @@ foreach vtiToWti = AllWidenableIntVectors in {
(vti.Vector vti.RegClass:$rs2),
(vti.Mask V0), VLOpFrag)),
(wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
(wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
(wti.Vector (SplatPat_uimm5 uimm5:$rs1)),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_shl_vl
@@ -831,37 +831,37 @@ foreach vtiToWti = AllWidenableIntVectors in {
(vti.Vector vti.RegClass:$rs2),
(vti.Mask V0), VLOpFrag)),
(wti.Vector (SplatPat_uimm5 uimm5:$rs1)),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, vti.RegClass:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1))),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, GPR:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(riscv_vwsll_vl
(vti.Vector vti.RegClass:$rs2),
(vti.Vector (SplatPat_uimm5 uimm5:$rs1)),
- (wti.Vector wti.RegClass:$pt),
+ (wti.Vector wti.RegClass:$passthru),
(vti.Mask V0), VLOpFrag),
(!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX#"_MASK")
- wti.RegClass:$pt, vti.RegClass:$rs2, uimm5:$rs1,
+ wti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$rs1,
(vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
}
@@ -989,11 +989,11 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
!if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW,
instruction#"_VI_"#vti.LMul.MX));
let Predicates = GetVTypePredicates<vti>.Predicates in
- def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs2),
(XLenVT uimm6:$rs1),
VLOpFrag)),
- (Pseudo (vti.Vector vti.RegClass:$pt),
+ (Pseudo (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs2),
(InvRot64Imm uimm6:$rs1),
GPR:$vl, vti.Log2SEW, TU_MU)>;
@@ -1003,12 +1003,12 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
!if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK",
instruction#"_VI_"#vti.LMul.MX#"_MASK"));
let Predicates = GetVTypePredicates<vti>.Predicates in
- def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$pt),
+ def : Pat<(vti.Vector (IntrMask (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs2),
(XLenVT uimm6:$rs1),
(vti.Mask V0),
VLOpFrag, (XLenVT timm:$policy))),
- (PseudoMask (vti.Vector vti.RegClass:$pt),
+ (PseudoMask (vti.Vector vti.RegClass:$passthru),
(vti.Vector vti.RegClass:$rs2),
(InvRot64Imm uimm6:$rs1),
(vti.Mask V0),
More information about the llvm-commits
mailing list