[llvm] [RISCV] Move the RISCVII namespaced enums into RISCVVType namespace in RISCVTargetParser.h. NFC (PR #127585)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 18 07:31:27 PST 2025
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/127585
>From 710ae505bd3377c61e6922192cf6aa2fc86593f3 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 17 Feb 2025 22:20:17 -0800
Subject: [PATCH 1/2] [RISCV] Add policy operand to masked vector compare
pseudos. Remove ForceTailAgnostic. NFC
Add a policy operand to set the tail agnostic policy instead of
using ForceTailAgnostic. The masked to unmasked transforms had
to be updated to drop the policy operand when converting to unmasked.
---
.../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 10 +------
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 26 ++++++++++++-----
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 5 ----
llvm/lib/Target/RISCV/RISCVInstrFormats.td | 29 +++++++++----------
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 12 ++++----
.../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 21 ++++++++------
llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp | 11 +++++--
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 12 ++++----
8 files changed, 64 insertions(+), 62 deletions(-)
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index 2f4b569041a6f..e1e50ac2078a7 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -65,13 +65,9 @@ enum {
VLMulShift = ConstraintShift + 3,
VLMulMask = 0b111 << VLMulShift,
- // Force a tail agnostic policy even this instruction has a tied destination.
- ForceTailAgnosticShift = VLMulShift + 3,
- ForceTailAgnosticMask = 1 << ForceTailAgnosticShift,
-
// Is this a _TIED vector pseudo instruction. For these instructions we
// shouldn't skip the tied operand when converting to MC instructions.
- IsTiedPseudoShift = ForceTailAgnosticShift + 1,
+ IsTiedPseudoShift = VLMulShift + 3,
IsTiedPseudoMask = 1 << IsTiedPseudoShift,
// Does this instruction have a SEW operand. It will be the last explicit
@@ -148,10 +144,6 @@ static inline unsigned getFormat(uint64_t TSFlags) {
static inline VLMUL getLMul(uint64_t TSFlags) {
return static_cast<VLMUL>((TSFlags & VLMulMask) >> VLMulShift);
}
-/// \returns true if tail agnostic is enforced for the instruction.
-static inline bool doesForceTailAgnostic(uint64_t TSFlags) {
- return TSFlags & ForceTailAgnosticMask;
-}
/// \returns true if this a _TIED pseudo.
static inline bool isTiedPseudo(uint64_t TSFlags) {
return TSFlags & IsTiedPseudoMask;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index fb2c5c62ef871..72b60439ca840 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1838,13 +1838,16 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
return;
}
+ SDValue PolicyOp =
+ CurDAG->getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+
if (IsCmpConstant) {
SDValue Imm =
selectImm(CurDAG, SDLoc(Src2), XLenVT, CVal - 1, *Subtarget);
ReplaceNode(Node, CurDAG->getMachineNode(
VMSGTMaskOpcode, DL, VT,
- {MaskedOff, Src1, Imm, Mask, VL, SEW}));
+ {MaskedOff, Src1, Imm, Mask, VL, SEW, PolicyOp}));
return;
}
@@ -1853,10 +1856,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// The result is mask undisturbed.
// We use the same instructions to emulate mask agnostic behavior, because
// the agnostic result can be either undisturbed or all 1.
- SDValue Cmp = SDValue(
- CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
- {MaskedOff, Src1, Src2, Mask, VL, SEW}),
- 0);
+ SDValue Cmp = SDValue(CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
+ {MaskedOff, Src1, Src2, Mask,
+ VL, SEW, PolicyOp}),
+ 0);
// vmxor.mm vd, vd, v0 is used to update active value.
ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
{Cmp, Mask, VL, MaskSEW}));
@@ -3792,9 +3795,9 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) {
const MCInstrDesc &MaskedMCID = TII->get(N->getMachineOpcode());
const bool MaskedHasPassthru = RISCVII::isFirstDefTiedToFirstUse(MaskedMCID);
- assert(RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) ==
- RISCVII::hasVecPolicyOp(MCID.TSFlags) &&
- "Masked and unmasked pseudos are inconsistent");
+ assert((RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) ||
+ !RISCVII::hasVecPolicyOp(MCID.TSFlags)) &&
+ "Unmasked pseudo has policy but masked pseudo doesn't?");
assert(RISCVII::hasVecPolicyOp(MCID.TSFlags) == HasPassthru &&
"Unexpected pseudo structure");
assert(!(HasPassthru && !MaskedHasPassthru) &&
@@ -3803,11 +3806,18 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) {
SmallVector<SDValue, 8> Ops;
// Skip the passthru operand at index 0 if the unmasked don't have one.
bool ShouldSkip = !HasPassthru && MaskedHasPassthru;
+ bool DropPolicy = !RISCVII::hasVecPolicyOp(MCID.TSFlags) &&
+ RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags);
+ bool HasChainOp =
+ N->getOperand(N->getNumOperands() - 1).getValueType() == MVT::Other;
+ unsigned LastOpNum = N->getNumOperands() - 1 - HasChainOp;
for (unsigned I = ShouldSkip, E = N->getNumOperands(); I != E; I++) {
// Skip the mask
SDValue Op = N->getOperand(I);
if (I == MaskOpIdx)
continue;
+ if (DropPolicy && I == LastOpNum)
+ continue;
Ops.push_back(Op);
}
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 4a74906ed3cc3..ffc7e09368824 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1049,11 +1049,6 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
}
- // Some pseudo instructions force a tail agnostic policy despite having a
- // tied def.
- if (RISCVII::doesForceTailAgnostic(TSFlags))
- TailAgnostic = true;
-
if (!RISCVII::usesMaskPolicy(TSFlags))
MaskAgnostic = true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index cea28bdce284c..47fe51bafd17c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -193,36 +193,33 @@ class RVInstCommon<dag outs, dag ins, string opcodestr, string argstr,
bits<3> VLMul = 0;
let TSFlags{10-8} = VLMul;
- bit ForceTailAgnostic = false;
- let TSFlags{11} = ForceTailAgnostic;
-
bit IsTiedPseudo = 0;
- let TSFlags{12} = IsTiedPseudo;
+ let TSFlags{11} = IsTiedPseudo;
bit HasSEWOp = 0;
- let TSFlags{13} = HasSEWOp;
+ let TSFlags{12} = HasSEWOp;
bit HasVLOp = 0;
- let TSFlags{14} = HasVLOp;
+ let TSFlags{13} = HasVLOp;
bit HasVecPolicyOp = 0;
- let TSFlags{15} = HasVecPolicyOp;
+ let TSFlags{14} = HasVecPolicyOp;
bit IsRVVWideningReduction = 0;
- let TSFlags{16} = IsRVVWideningReduction;
+ let TSFlags{15} = IsRVVWideningReduction;
bit UsesMaskPolicy = 0;
- let TSFlags{17} = UsesMaskPolicy;
+ let TSFlags{16} = UsesMaskPolicy;
// Indicates that the result can be considered sign extended from bit 31. Some
// instructions with this flag aren't W instructions, but are either sign
// extended from a smaller size, always outputs a small integer, or put zeros
// in bits 63:31. Used by the SExtWRemoval pass.
bit IsSignExtendingOpW = 0;
- let TSFlags{18} = IsSignExtendingOpW;
+ let TSFlags{17} = IsSignExtendingOpW;
bit HasRoundModeOp = 0;
- let TSFlags{19} = HasRoundModeOp;
+ let TSFlags{18} = HasRoundModeOp;
// This is only valid when HasRoundModeOp is set to 1. HasRoundModeOp is set
// to 1 for vector fixed-point or floating-point intrinsics. This bit is
@@ -230,7 +227,7 @@ class RVInstCommon<dag outs, dag ins, string opcodestr, string argstr,
// fixed-point / floating-point instructions and emit appropriate read/write
// to the correct CSR.
bit UsesVXRM = 0;
- let TSFlags{20} = UsesVXRM;
+ let TSFlags{19} = UsesVXRM;
// Indicates whether these instructions can partially overlap between source
// registers and destination registers according to the vector spec.
@@ -239,19 +236,19 @@ class RVInstCommon<dag outs, dag ins, string opcodestr, string argstr,
// 2 -> narrowing case
// 3 -> widening case
bits<2> TargetOverlapConstraintType = 0;
- let TSFlags{22-21} = TargetOverlapConstraintType;
+ let TSFlags{21-20} = TargetOverlapConstraintType;
// Most vector instructions are elementwise, but some may depend on the value
// of VL (e.g. vslide1down.vx), and others may depend on the VL and mask
// (e.g. vredsum.vs, viota.m). Mark these instructions so that peepholes avoid
// changing their VL and/or mask.
EltDeps ElementsDependOn = EltDepsNone;
- let TSFlags{23} = ElementsDependOn.VL;
- let TSFlags{24} = ElementsDependOn.Mask;
+ let TSFlags{22} = ElementsDependOn.VL;
+ let TSFlags{23} = ElementsDependOn.Mask;
// Indicates the EEW of a vector instruction's destination operand.
EEW DestEEW = EEWSEWx1;
- let TSFlags{26-25} = DestEEW.Value;
+ let TSFlags{25-24} = DestEEW.Value;
}
class RVInst<dag outs, dag ins, string opcodestr, string argstr,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 33c04d1c05613..cc58cdf02e09c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1409,7 +1409,7 @@ class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
let hasPostISelHook = 1;
}
-// Like VPseudoBinaryMaskPolicy, but output can be V0 and there is no policy.
+// Like VPseudoBinaryMaskPolicy, but output can be V0.
class VPseudoBinaryMOutMask<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
@@ -1418,7 +1418,7 @@ class VPseudoBinaryMOutMask<VReg RetClass,
Pseudo<(outs RetClass:$rd),
(ins RetClass:$passthru,
Op1Class:$rs2, Op2Class:$rs1,
- VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
+ VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
@@ -1427,6 +1427,7 @@ class VPseudoBinaryMOutMask<VReg RetClass,
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
+ let HasVecPolicyOp = 1;
let UsesMaskPolicy = 1;
}
@@ -2622,7 +2623,6 @@ multiclass VPseudoBinaryM<DAGOperand Op2Class, LMULInfo m, bit Commutable = 0> {
VPseudoBinaryNoMask<VR, m.vrclass, Op2Class,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", ""),
TargetConstraintType = 2>;
- let ForceTailAgnostic = true in
def "_" # m.MX # "_MASK" :
VPseudoBinaryMOutMask<VR, m.vrclass, Op2Class,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", ""),
@@ -4140,7 +4140,7 @@ class VPatBinaryMask<string intrinsic_name,
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type VMV0:$vm), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew, TA_MU)>;
class VPatBinaryMaskPolicy<string intrinsic_name,
string inst,
@@ -4210,7 +4210,7 @@ class VPatBinaryMaskSwapped<string intrinsic_name,
(result_type result_reg_class:$passthru),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type VMV0:$vm), GPR:$vl, sew)>;
+ (mask_type VMV0:$vm), GPR:$vl, sew, TA_MU)>;
class VPatTiedBinaryNoMask<string intrinsic_name,
string inst,
@@ -6013,7 +6013,7 @@ multiclass VPatCompare_VI<string intrinsic, string inst,
(vti.Mask VMV0:$vm),
VLOpFrag)),
(PseudoMask VR:$passthru, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
- (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index ffa3d3982647d..43cfc9d1e77ca 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1020,7 +1020,7 @@ multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
VR:$passthru,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
- (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
+ (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>;
}
// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped.
@@ -1034,7 +1034,8 @@ multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_nam
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
VR:$passthru, vti.RegClass:$rs1,
- vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl,
+ vti.Log2SEW, TA_MU)>;
}
multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name,
@@ -1046,14 +1047,16 @@ multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_nam
(vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$passthru, vti.RegClass:$rs1,
- GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
+ GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl,
+ vti.Log2SEW, TA_MU)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1), invcc,
VR:$passthru,
(vti.Mask VMV0:$vm),
VLOpFrag)),
(instruction_masked VR:$passthru, vti.RegClass:$rs1,
- GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>;
+ GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl,
+ vti.Log2SEW, TA_MU)>;
}
multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
@@ -1067,7 +1070,7 @@ multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_nam
VLOpFrag)),
(instruction_masked VR:$passthru, vti.RegClass:$rs1,
XLenVT:$rs2, (vti.Mask VMV0:$vm), GPR:$vl,
- vti.Log2SEW)>;
+ vti.Log2SEW, TA_MU)>;
// FIXME: Can do some canonicalization to remove these patterns.
def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2),
@@ -1077,7 +1080,7 @@ multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_nam
VLOpFrag)),
(instruction_masked VR:$passthru, vti.RegClass:$rs1,
simm5:$rs2, (vti.Mask VMV0:$vm), GPR:$vl,
- vti.Log2SEW)>;
+ vti.Log2SEW, TA_MU)>;
}
multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
@@ -1094,7 +1097,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK")
VR:$passthru, fvti.RegClass:$rs1,
fvti.RegClass:$rs2, (fvti.Mask VMV0:$vm),
- GPR:$vl, fvti.Log2SEW)>;
+ GPR:$vl, fvti.Log2SEW, TA_MU)>;
def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1),
(SplatFPOp fvti.ScalarRegClass:$rs2),
cc,
@@ -1104,7 +1107,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
VR:$passthru, fvti.RegClass:$rs1,
fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm),
- GPR:$vl, fvti.Log2SEW)>;
+ GPR:$vl, fvti.Log2SEW, TA_MU)>;
def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc,
@@ -1114,7 +1117,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc,
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
VR:$passthru, fvti.RegClass:$rs1,
fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm),
- GPR:$vl, fvti.Log2SEW)>;
+ GPR:$vl, fvti.Log2SEW, TA_MU)>;
}
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index a4e7219c39f37..5ef1c9444f59a 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -466,9 +466,9 @@ bool RISCVVectorPeephole::convertToUnmasked(MachineInstr &MI) const {
RISCVII::hasVecPolicyOp(MCID.TSFlags);
const bool HasPassthru = RISCVII::isFirstDefTiedToFirstUse(MCID);
const MCInstrDesc &MaskedMCID = TII->get(MI.getOpcode());
- assert(RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) ==
- RISCVII::hasVecPolicyOp(MCID.TSFlags) &&
- "Masked and unmasked pseudos are inconsistent");
+ assert((RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) ||
+ !RISCVII::hasVecPolicyOp(MCID.TSFlags)) &&
+ "Unmasked pseudo has policy but masked pseudo doesn't?");
assert(HasPolicyOp == HasPassthru && "Unexpected pseudo structure");
assert(!(HasPassthru && !RISCVII::isFirstDefTiedToFirstUse(MaskedMCID)) &&
"Unmasked with passthru but masked with no passthru?");
@@ -476,6 +476,11 @@ bool RISCVVectorPeephole::convertToUnmasked(MachineInstr &MI) const {
MI.setDesc(MCID);
+ // Drop the policy operand if unmasked doesn't need it.
+ if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags) &&
+ !RISCVII::hasVecPolicyOp(MCID.TSFlags))
+ MI.removeOperand(RISCVII::getVecPolicyOpNum(MaskedMCID));
+
// TODO: Increment all MaskOpIdxs in tablegen by num of explicit defs?
unsigned MaskOpIdx = I->MaskOpIdx + MI.getNumExplicitDefs();
MI.removeOperand(MaskOpIdx);
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index d2906c4613295..c84f7735b66d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1116,10 +1116,10 @@ body: |
bb.0:
; CHECK-LABEL: name: vmop_vv_passthru_use
; CHECK: %x:vrnov0 = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
- ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */
%x:vrnov0 = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e1 */
- %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */
+ %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
%z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */
...
---
@@ -1128,10 +1128,10 @@ body: |
bb.0:
; CHECK-LABEL: name: vmop_vv_passthru_use_incompatible_eew
; CHECK: %x:vrnov0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
- ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */
%x:vrnov0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
- %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */
+ %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
%z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */
...
---
@@ -1140,10 +1140,10 @@ body: |
bb.0:
; CHECK-LABEL: name: vmop_vv_passthru_use_incompatible_emul
; CHECK: %x:vrnov0 = PseudoVMAND_MM_B16 $noreg, $noreg, -1, 0 /* e8 */
- ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
; CHECK-NEXT: %z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e8 */
%x:vrnov0 = PseudoVMAND_MM_B16 $noreg, $noreg, -1, 0 /* e1 */
- %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */
+ %y:vrnov0 = PseudoVMSEQ_VV_M1_MASK %x, $noreg, $noreg, $noreg, 1, 3 /* e8 */, 1
%z:vr = PseudoVMAND_MM_B8 %y, $noreg, 1, 0 /* e1 */
...
---
>From 53ffb4daed31dc9f557dd64ea254d1fd35152a22 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 17 Feb 2025 23:17:26 -0800
Subject: [PATCH 2/2] [RISCV] Move the RISCVII namespaced enums into RISCVVType
namespace in RISCVTargetParser.h. NFC
The VLMUL and policy enums originally lived in RISCVBaseInfo.h in the
backend which is where everything else in the RISCVII namespace is defined.
RISCVTargetParser.h is used by much more of the compiler and it
doesn't really make sense to have 2 different namespaces exposed.
---
.../llvm/TargetParser/RISCVTargetParser.h | 23 ++-
llvm/lib/Analysis/ValueTracking.cpp | 2 +-
.../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 2 +-
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 8 +-
.../Target/RISCV/MCA/RISCVCustomBehaviour.cpp | 22 +--
.../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 4 +-
.../RISCV/MCTargetDesc/RISCVInstPrinter.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 57 ++++----
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 136 +++++++++---------
llvm/lib/Target/RISCV/RISCVISelLowering.h | 4 +-
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 24 ++--
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 37 ++---
llvm/lib/Target/RISCV/RISCVRegisterInfo.h | 5 +-
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 8 +-
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 6 +-
.../RISCV/RISCVVectorMaskDAGMutation.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp | 12 +-
llvm/lib/TargetParser/RISCVTargetParser.cpp | 35 +++--
.../TargetParser/RISCVTargetParserTest.cpp | 24 ++--
19 files changed, 207 insertions(+), 206 deletions(-)
diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
index c237e1ddd6b38..b13a94cd56f2e 100644
--- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h
+++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
@@ -65,7 +65,7 @@ CPUModel getCPUModel(StringRef CPU);
} // namespace RISCV
-namespace RISCVII {
+namespace RISCVVType {
enum VLMUL : uint8_t {
LMUL_1 = 0,
LMUL_2,
@@ -82,9 +82,7 @@ enum {
TAIL_AGNOSTIC = 1,
MASK_AGNOSTIC = 2,
};
-} // namespace RISCVII
-namespace RISCVVType {
// Is this a SEW value that can be encoded into the VTYPE format.
inline static bool isValidSEW(unsigned SEW) {
return isPowerOf2_32(SEW) && SEW >= 8 && SEW <= 64;
@@ -95,21 +93,21 @@ inline static bool isValidLMUL(unsigned LMUL, bool Fractional) {
return isPowerOf2_32(LMUL) && LMUL <= 8 && (!Fractional || LMUL != 1);
}
-unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
+unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
bool MaskAgnostic);
-inline static RISCVII::VLMUL getVLMUL(unsigned VType) {
- unsigned VLMUL = VType & 0x7;
- return static_cast<RISCVII::VLMUL>(VLMUL);
+inline static VLMUL getVLMUL(unsigned VType) {
+ unsigned VLMul = VType & 0x7;
+ return static_cast<VLMUL>(VLMul);
}
// Decode VLMUL into 1,2,4,8 and fractional indicator.
-std::pair<unsigned, bool> decodeVLMUL(RISCVII::VLMUL VLMUL);
+std::pair<unsigned, bool> decodeVLMUL(VLMUL VLMul);
-inline static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional) {
+inline static VLMUL encodeLMUL(unsigned LMUL, bool Fractional) {
assert(isValidLMUL(LMUL, Fractional) && "Unsupported LMUL");
unsigned LmulLog2 = Log2_32(LMUL);
- return static_cast<RISCVII::VLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);
+ return static_cast<VLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);
}
inline static unsigned decodeVSEW(unsigned VSEW) {
@@ -133,10 +131,9 @@ inline static bool isMaskAgnostic(unsigned VType) { return VType & 0x80; }
void printVType(unsigned VType, raw_ostream &OS);
-unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul);
+unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul);
-std::optional<RISCVII::VLMUL>
-getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW);
+std::optional<VLMUL> getSameRatioLMUL(unsigned SEW, VLMUL VLMUL, unsigned EEW);
} // namespace RISCVVType
} // namespace llvm
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 91a5f194db9dc..e3e026f7979da 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1984,7 +1984,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
const ConstantRange Range = getVScaleRange(II->getFunction(), BitWidth);
uint64_t SEW = RISCVVType::decodeVSEW(
cast<ConstantInt>(II->getArgOperand(HasAVL))->getZExtValue());
- RISCVII::VLMUL VLMUL = static_cast<RISCVII::VLMUL>(
+ RISCVVType::VLMUL VLMUL = static_cast<RISCVVType::VLMUL>(
cast<ConstantInt>(II->getArgOperand(1 + HasAVL))->getZExtValue());
uint64_t MaxVLEN =
Range.getUnsignedMax().getZExtValue() * RISCV::RVVBitsPerBlock;
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index ac87d72b7595c..6d4466b7abf53 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -2311,7 +2311,7 @@ ParseStatus RISCVAsmParser::parseVTypeI(OperandVector &Operands) {
}
if (getLexer().is(AsmToken::EndOfStatement) && State == VTypeState_Done) {
- RISCVII::VLMUL VLMUL = RISCVVType::encodeLMUL(Lmul, Fractional);
+ RISCVVType::VLMUL VLMUL = RISCVVType::encodeLMUL(Lmul, Fractional);
if (Fractional) {
unsigned ELEN = STI->hasFeature(RISCV::FeatureStdExtZve64x) ? 64 : 32;
unsigned MaxSEW = ELEN / Lmul;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 6f0645965d737..56b1639143d8b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -1120,7 +1120,7 @@ bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI,
// divide exactly.
assert(
RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(LitTyMVT)).second ||
- RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVII::VLMUL::LMUL_1);
+ RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVVType::LMUL_1);
// If the vector type is an LMUL-group type, extract a subvector equal to the
// nearest full vector register type.
@@ -1143,7 +1143,7 @@ bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI,
const LLT XLenTy(STI.getXLenVT());
auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx);
auto [Mask, VL] = buildDefaultVLOps(LitTy, MIB, MRI);
- uint64_t Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ uint64_t Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC;
auto Slidedown = MIB.buildInstr(
RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
{MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
@@ -1265,10 +1265,10 @@ bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI,
// Use tail agnostic policy if we're inserting over InterLitTy's tail.
ElementCount EndIndex =
ElementCount::getScalable(RemIdx) + LitTy.getElementCount();
- uint64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ uint64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (STI.expandVScale(EndIndex) ==
STI.expandVScale(InterLitTy.getElementCount()))
- Policy = RISCVII::TAIL_AGNOSTIC;
+ Policy = RISCVVType::TAIL_AGNOSTIC;
Inserted =
MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst},
diff --git a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
index fb0dc482e6081..0881de90700ab 100644
--- a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
+++ b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
@@ -107,32 +107,32 @@ RISCVInstrumentManager::createInstruments(const MCInst &Inst) {
LLVM_DEBUG(dbgs() << "RVCB: Found VSETVLI and creating instrument for it: "
<< Inst << "\n");
unsigned VTypeI = Inst.getOperand(2).getImm();
- RISCVII::VLMUL VLMUL = RISCVVType::getVLMUL(VTypeI);
+ RISCVVType::VLMUL VLMUL = RISCVVType::getVLMUL(VTypeI);
StringRef LMUL;
switch (VLMUL) {
- case RISCVII::LMUL_1:
+ case RISCVVType::LMUL_1:
LMUL = "M1";
break;
- case RISCVII::LMUL_2:
+ case RISCVVType::LMUL_2:
LMUL = "M2";
break;
- case RISCVII::LMUL_4:
+ case RISCVVType::LMUL_4:
LMUL = "M4";
break;
- case RISCVII::LMUL_8:
+ case RISCVVType::LMUL_8:
LMUL = "M8";
break;
- case RISCVII::LMUL_F2:
+ case RISCVVType::LMUL_F2:
LMUL = "MF2";
break;
- case RISCVII::LMUL_F4:
+ case RISCVVType::LMUL_F4:
LMUL = "MF4";
break;
- case RISCVII::LMUL_F8:
+ case RISCVVType::LMUL_F8:
LMUL = "MF8";
break;
- case RISCVII::LMUL_RESERVED:
+ case RISCVVType::LMUL_RESERVED:
llvm_unreachable("Cannot create instrument for LMUL_RESERVED");
}
SmallVector<UniqueInstrument> Instruments;
@@ -166,7 +166,7 @@ RISCVInstrumentManager::createInstruments(const MCInst &Inst) {
}
static std::pair<uint8_t, uint8_t>
-getEEWAndEMUL(unsigned Opcode, RISCVII::VLMUL LMUL, uint8_t SEW) {
+getEEWAndEMUL(unsigned Opcode, RISCVVType::VLMUL LMUL, uint8_t SEW) {
uint8_t EEW;
switch (Opcode) {
case RISCV::VLM_V:
@@ -249,7 +249,7 @@ unsigned RISCVInstrumentManager::getSchedClassID(
const RISCVVInversePseudosTable::PseudoInfo *RVV = nullptr;
if (opcodeHasEEWAndEMULInfo(Opcode)) {
- RISCVII::VLMUL VLMUL = static_cast<RISCVII::VLMUL>(LMUL);
+ RISCVVType::VLMUL VLMUL = static_cast<RISCVVType::VLMUL>(LMUL);
auto [EEW, EMUL] = getEEWAndEMUL(Opcode, VLMUL, SEW);
RVV = RISCVVInversePseudosTable::getBaseInfo(Opcode, EMUL, EEW);
} else {
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index e1e50ac2078a7..58eb48ed613df 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -141,8 +141,8 @@ static inline unsigned getFormat(uint64_t TSFlags) {
return (TSFlags & InstFormatMask) >> InstFormatShift;
}
/// \returns the LMUL for the instruction.
-static inline VLMUL getLMul(uint64_t TSFlags) {
- return static_cast<VLMUL>((TSFlags & VLMulMask) >> VLMulShift);
+static inline RISCVVType::VLMUL getLMul(uint64_t TSFlags) {
+ return static_cast<RISCVVType::VLMUL>((TSFlags & VLMulMask) >> VLMulShift);
}
/// \returns true if this a _TIED pseudo.
static inline bool isTiedPseudo(uint64_t TSFlags) {
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
index d5254719b3839..a4a40862a67c6 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
@@ -210,7 +210,7 @@ void RISCVInstPrinter::printVTypeI(const MCInst *MI, unsigned OpNo,
unsigned Imm = MI->getOperand(OpNo).getImm();
// Print the raw immediate for reserved values: vlmul[2:0]=4, vsew[2:0]=0b1xx,
// or non-zero in bits 8 and above.
- if (RISCVVType::getVLMUL(Imm) == RISCVII::VLMUL::LMUL_RESERVED ||
+ if (RISCVVType::getVLMUL(Imm) == RISCVVType::VLMUL::LMUL_RESERVED ||
RISCVVType::getSEW(Imm) > 64 || (Imm >> 8) != 0) {
O << formatImm(Imm);
return;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 72b60439ca840..7ea4bd94c0065 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -279,7 +279,7 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
// none of the others do. All have passthru operands. For our pseudos,
// all loads have policy operands.
if (IsLoad) {
- uint64_t Policy = RISCVII::MASK_AGNOSTIC;
+ uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
if (IsMasked)
Policy = Node->getConstantOperandVal(CurOp++);
SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
@@ -294,7 +294,7 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked,
SDLoc DL(Node);
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
@@ -324,7 +324,7 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, unsigned NF,
MVT VT = Node->getSimpleValueType(0);
MVT XLenVT = Subtarget->getXLenVT();
unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 7> Operands;
@@ -355,7 +355,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked,
SDLoc DL(Node);
MVT VT = Node->getSimpleValueType(0);
unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
@@ -379,7 +379,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked,
"Element count mismatch");
#endif
- RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
@@ -404,7 +404,7 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked,
SDLoc DL(Node);
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
@@ -430,7 +430,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked,
SDLoc DL(Node);
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
@@ -454,7 +454,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked,
"Element count mismatch");
#endif
- RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
@@ -495,7 +495,7 @@ void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
unsigned SEW =
RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
- RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
+ RISCVVType::VLMUL VLMul = static_cast<RISCVVType::VLMUL>(
Node->getConstantOperandVal(Offset + 1) & 0x7);
unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
@@ -1672,7 +1672,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
- case RISCVII::VLMUL::lmulenum: \
+ case RISCVVType::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
@@ -1692,7 +1692,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix) \
- case RISCVII::VLMUL::lmulenum: \
+ case RISCVVType::lmulenum: \
VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \
break;
@@ -1768,7 +1768,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
- case RISCVII::VLMUL::lmulenum: \
+ case RISCVVType::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
@@ -1790,7 +1790,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
- case RISCVII::VLMUL::lmulenum: \
+ case RISCVVType::lmulenum: \
VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
@@ -1839,7 +1839,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
}
SDValue PolicyOp =
- CurDAG->getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+ CurDAG->getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT);
if (IsCmpConstant) {
SDValue Imm =
@@ -2005,8 +2005,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
- RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
@@ -2058,7 +2058,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands, /*IsLoad=*/true);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
static_cast<unsigned>(LMUL));
@@ -2085,7 +2085,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
/*IsStridedOrIndexed*/ false, Operands,
/*IsLoad=*/true);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true,
Log2SEW, static_cast<unsigned>(LMUL));
@@ -2211,8 +2211,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
- RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
report_fatal_error("The V extension does not support EEW=64 for index "
@@ -2250,7 +2250,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Store =
@@ -2317,11 +2317,12 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (Idx != 0)
break;
- RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
+ RISCVVType::VLMUL SubVecLMUL =
+ RISCVTargetLowering::getLMUL(SubVecContainerVT);
[[maybe_unused]] bool IsSubVecPartReg =
- SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
- SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
- SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
+ SubVecLMUL == RISCVVType::VLMUL::LMUL_F2 ||
+ SubVecLMUL == RISCVVType::VLMUL::LMUL_F4 ||
+ SubVecLMUL == RISCVVType::VLMUL::LMUL_F8;
assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg ||
V.isUndef()) &&
"Expecting lowering to have created legal INSERT_SUBVECTORs when "
@@ -2442,11 +2443,11 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
Ld->getBasePtr()};
if (IsStrided)
Operands.push_back(CurDAG->getRegister(RISCV::X0, XLenVT));
- uint64_t Policy = RISCVII::MASK_AGNOSTIC | RISCVII::TAIL_AGNOSTIC;
+ uint64_t Policy = RISCVVType::MASK_AGNOSTIC | RISCVVType::TAIL_AGNOSTIC;
SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
/*IsMasked*/ false, IsStrided, /*FF*/ false,
Log2SEW, static_cast<unsigned>(LMUL));
@@ -3985,7 +3986,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
// preserve them.
bool MergeVLShrunk = VL != OrigVL;
uint64_t Policy = (isImplicitDef(Passthru) && !MergeVLShrunk)
- ? RISCVII::TAIL_AGNOSTIC
+ ? RISCVVType::TAIL_AGNOSTIC
: /*TUMU*/ 0;
SDValue PolicyOp =
CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT());
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c40ab0d09bdf6..98c25bc93a8a2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1110,7 +1110,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(FloatingPointLibCallOps, VT, Expand);
// Custom split nxv32[b]f16 since nxv32[b]f32 is not legal.
- if (getLMUL(VT) == RISCVII::VLMUL::LMUL_8) {
+ if (getLMUL(VT) == RISCVVType::LMUL_8) {
setOperationAction(ZvfhminZvfbfminPromoteOps, VT, Custom);
setOperationAction(ZvfhminZvfbfminPromoteVPOps, VT, Custom);
} else {
@@ -2361,25 +2361,25 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
}
}
-RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
+RISCVVType::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
if (VT.isRISCVVectorTuple()) {
if (VT.SimpleTy >= MVT::riscv_nxv1i8x2 &&
VT.SimpleTy <= MVT::riscv_nxv1i8x8)
- return RISCVII::LMUL_F8;
+ return RISCVVType::LMUL_F8;
if (VT.SimpleTy >= MVT::riscv_nxv2i8x2 &&
VT.SimpleTy <= MVT::riscv_nxv2i8x8)
- return RISCVII::LMUL_F4;
+ return RISCVVType::LMUL_F4;
if (VT.SimpleTy >= MVT::riscv_nxv4i8x2 &&
VT.SimpleTy <= MVT::riscv_nxv4i8x8)
- return RISCVII::LMUL_F2;
+ return RISCVVType::LMUL_F2;
if (VT.SimpleTy >= MVT::riscv_nxv8i8x2 &&
VT.SimpleTy <= MVT::riscv_nxv8i8x8)
- return RISCVII::LMUL_1;
+ return RISCVVType::LMUL_1;
if (VT.SimpleTy >= MVT::riscv_nxv16i8x2 &&
VT.SimpleTy <= MVT::riscv_nxv16i8x4)
- return RISCVII::LMUL_2;
+ return RISCVVType::LMUL_2;
if (VT.SimpleTy == MVT::riscv_nxv32i8x2)
- return RISCVII::LMUL_4;
+ return RISCVVType::LMUL_4;
llvm_unreachable("Invalid vector tuple type LMUL.");
}
@@ -2392,56 +2392,54 @@ RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
default:
llvm_unreachable("Invalid LMUL.");
case 8:
- return RISCVII::VLMUL::LMUL_F8;
+ return RISCVVType::LMUL_F8;
case 16:
- return RISCVII::VLMUL::LMUL_F4;
+ return RISCVVType::LMUL_F4;
case 32:
- return RISCVII::VLMUL::LMUL_F2;
+ return RISCVVType::LMUL_F2;
case 64:
- return RISCVII::VLMUL::LMUL_1;
+ return RISCVVType::LMUL_1;
case 128:
- return RISCVII::VLMUL::LMUL_2;
+ return RISCVVType::LMUL_2;
case 256:
- return RISCVII::VLMUL::LMUL_4;
+ return RISCVVType::LMUL_4;
case 512:
- return RISCVII::VLMUL::LMUL_8;
+ return RISCVVType::LMUL_8;
}
}
-unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
+unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVType::VLMUL LMul) {
switch (LMul) {
default:
llvm_unreachable("Invalid LMUL.");
- case RISCVII::VLMUL::LMUL_F8:
- case RISCVII::VLMUL::LMUL_F4:
- case RISCVII::VLMUL::LMUL_F2:
- case RISCVII::VLMUL::LMUL_1:
+ case RISCVVType::LMUL_F8:
+ case RISCVVType::LMUL_F4:
+ case RISCVVType::LMUL_F2:
+ case RISCVVType::LMUL_1:
return RISCV::VRRegClassID;
- case RISCVII::VLMUL::LMUL_2:
+ case RISCVVType::LMUL_2:
return RISCV::VRM2RegClassID;
- case RISCVII::VLMUL::LMUL_4:
+ case RISCVVType::LMUL_4:
return RISCV::VRM4RegClassID;
- case RISCVII::VLMUL::LMUL_8:
+ case RISCVVType::LMUL_8:
return RISCV::VRM8RegClassID;
}
}
unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
- RISCVII::VLMUL LMUL = getLMUL(VT);
- if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
- LMUL == RISCVII::VLMUL::LMUL_F4 ||
- LMUL == RISCVII::VLMUL::LMUL_F2 ||
- LMUL == RISCVII::VLMUL::LMUL_1) {
+ RISCVVType::VLMUL LMUL = getLMUL(VT);
+ if (LMUL == RISCVVType::LMUL_F8 || LMUL == RISCVVType::LMUL_F4 ||
+ LMUL == RISCVVType::LMUL_F2 || LMUL == RISCVVType::LMUL_1) {
static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
"Unexpected subreg numbering");
return RISCV::sub_vrm1_0 + Index;
}
- if (LMUL == RISCVII::VLMUL::LMUL_2) {
+ if (LMUL == RISCVVType::LMUL_2) {
static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
"Unexpected subreg numbering");
return RISCV::sub_vrm2_0 + Index;
}
- if (LMUL == RISCVII::VLMUL::LMUL_4) {
+ if (LMUL == RISCVVType::LMUL_4) {
static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
"Unexpected subreg numbering");
return RISCV::sub_vrm4_0 + Index;
@@ -3347,9 +3345,9 @@ static SDValue
getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op,
SDValue Offset, SDValue Mask, SDValue VL,
- unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
+ unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
if (Passthru.isUndef())
- Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC;
SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops);
@@ -3359,9 +3357,9 @@ static SDValue
getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask,
SDValue VL,
- unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
+ unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
if (Passthru.isUndef())
- Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC;
SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops);
@@ -4245,13 +4243,13 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
InstructionCost PerSlideCost = 1;
switch (RISCVTargetLowering::getLMUL(ContainerVT)) {
default: break;
- case RISCVII::VLMUL::LMUL_2:
+ case RISCVVType::LMUL_2:
PerSlideCost = 2;
break;
- case RISCVII::VLMUL::LMUL_4:
+ case RISCVVType::LMUL_4:
PerSlideCost = 4;
break;
- case RISCVII::VLMUL::LMUL_8:
+ case RISCVVType::LMUL_8:
PerSlideCost = 8;
break;
}
@@ -4281,7 +4279,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
VT.getVectorElementType().getSizeInBits() <= Subtarget.getFLen()) &&
"Illegal type which will result in reserved encoding");
- const unsigned Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ const unsigned Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC;
SDValue Vec;
UndefCount = 0;
@@ -4773,11 +4771,12 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT,
auto TrueMask = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).first;
// We slide up by the index that the subvector is being inserted at, and set
// VL to the index + the number of elements being inserted.
- unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVII::MASK_AGNOSTIC;
+ unsigned Policy =
+ RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVVType::MASK_AGNOSTIC;
// If the we're adding a suffix to the in place vector, i.e. inserting right
// up to the very end of it, then we don't actually care about the tail.
if (NumSubElts + Index >= (int)NumElts)
- Policy |= RISCVII::TAIL_AGNOSTIC;
+ Policy |= RISCVVType::TAIL_AGNOSTIC;
InPlace = convertToScalableVector(ContainerVT, InPlace, DAG, Subtarget);
ToInsert = convertToScalableVector(ContainerVT, ToInsert, DAG, Subtarget);
@@ -5570,7 +5569,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
if (LoV)
Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV,
DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL,
- RISCVII::TAIL_AGNOSTIC);
+ RISCVVType::TAIL_AGNOSTIC);
return convertFromScalableVector(VT, Res, DAG, Subtarget);
}
@@ -9457,10 +9456,10 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
// Use tail agnostic policy if Idx is the last index of Vec.
- unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Idx) &&
Idx->getAsZExtVal() + 1 == VecVT.getVectorNumElements())
- Policy = RISCVII::TAIL_AGNOSTIC;
+ Policy = RISCVVType::TAIL_AGNOSTIC;
SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
Idx, Mask, InsertVL, Policy);
@@ -9740,7 +9739,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
}
}
if (!I32VL) {
- RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
+ RISCVVType::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
@@ -9791,7 +9790,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
if (MaskedOff.isUndef())
return Vec;
// TAMU
- if (Policy == RISCVII::TAIL_AGNOSTIC)
+ if (Policy == RISCVVType::TAIL_AGNOSTIC)
return DAG.getNode(RISCVISD::VMERGE_VL, DL, VT, Mask, Vec, MaskedOff,
DAG.getUNDEF(VT), AVL);
// TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
@@ -10547,7 +10546,7 @@ static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT,
DAG.getNode(ISD::INSERT_SUBVECTOR, DL, M1VT, DAG.getUNDEF(M1VT),
InitialValue, DAG.getVectorIdxConstant(0, DL));
SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(M1VT) : InitialValue;
- SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+ SDValue Policy = DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT);
SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy};
SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, Ops);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
@@ -10807,9 +10806,9 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
SDValue VL = DAG.getConstant(EndIndex, DL, XLenVT);
// Use tail agnostic policy if we're inserting over Vec's tail.
- unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (VecVT.isFixedLengthVector() && EndIndex == VecVT.getVectorNumElements())
- Policy = RISCVII::TAIL_AGNOSTIC;
+ Policy = RISCVVType::TAIL_AGNOSTIC;
// If we're inserting into the lowest elements, use a tail undisturbed
// vmv.v.v.
@@ -10933,10 +10932,10 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
VL = DAG.getElementCount(DL, XLenVT, SubVecVT.getVectorElementCount());
// Use tail agnostic policy if we're inserting over InterSubVT's tail.
- unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (Subtarget.expandVScale(EndIndex) ==
Subtarget.expandVScale(InterSubVT.getVectorElementCount()))
- Policy = RISCVII::TAIL_AGNOSTIC;
+ Policy = RISCVVType::TAIL_AGNOSTIC;
// If we're inserting into the lowest elements, use a tail undisturbed
// vmv.v.v.
@@ -11108,7 +11107,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
// was > M1 then the index would need to be a multiple of VLMAX, and so would
// divide exactly.
assert(RISCVVType::decodeVLMUL(getLMUL(ContainerSubVecVT)).second ||
- getLMUL(ContainerSubVecVT) == RISCVII::VLMUL::LMUL_1);
+ getLMUL(ContainerSubVecVT) == RISCVVType::LMUL_1);
// If the vector type is an LMUL-group type, extract a subvector equal to the
// nearest full vector register type.
@@ -11719,7 +11718,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
DownOffset, TrueMask, UpOffset);
return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset,
TrueMask, DAG.getRegister(RISCV::X0, XLenVT),
- RISCVII::TAIL_AGNOSTIC);
+ RISCVVType::TAIL_AGNOSTIC);
}
SDValue
@@ -11883,7 +11882,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
Ops.push_back(Mask);
Ops.push_back(VL);
if (IntID == Intrinsic::riscv_vle_mask)
- Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
+ Ops.push_back(DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT));
SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
@@ -11902,7 +11901,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
// overflow.
if (IndexEltVT == MVT::i8 && VT.getVectorNumElements() > 256) {
// FIXME: We need to do vector splitting manually for LMUL=8 cases.
- assert(getLMUL(IndexVT) != RISCVII::LMUL_8);
+ assert(getLMUL(IndexVT) != RISCVVType::LMUL_8);
IndexVT = IndexVT.changeVectorElementType(MVT::i16);
UseVRGATHEREI16 = true;
}
@@ -12698,7 +12697,7 @@ RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
Op1, DownOffset, Mask, UpOffset);
SDValue Result = getVSlideup(DAG, Subtarget, DL, ContainerVT, SlideDown, Op2,
- UpOffset, Mask, EVL2, RISCVII::TAIL_AGNOSTIC);
+ UpOffset, Mask, EVL2, RISCVVType::TAIL_AGNOSTIC);
if (IsMaskVector) {
// Truncate Result back to a mask vector (Result has same EVL as Op2)
@@ -12915,7 +12914,8 @@ SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op,
}
Ops.push_back(VPNode->getVectorLength());
if (!IsUnmasked) {
- SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+ SDValue Policy =
+ DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT);
Ops.push_back(Policy);
}
@@ -13053,7 +13053,7 @@ SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
Ops.push_back(Mask);
Ops.push_back(VL);
if (!IsUnmasked)
- Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
+ Ops.push_back(DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT));
SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
SDValue Result =
@@ -19553,8 +19553,8 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case Intrinsic::riscv_vsetvlimax: {
bool HasAVL = IntNo == Intrinsic::riscv_vsetvli;
unsigned VSEW = Op.getConstantOperandVal(HasAVL + 1);
- RISCVII::VLMUL VLMUL =
- static_cast<RISCVII::VLMUL>(Op.getConstantOperandVal(HasAVL + 2));
+ RISCVVType::VLMUL VLMUL =
+ static_cast<RISCVVType::VLMUL>(Op.getConstantOperandVal(HasAVL + 2));
unsigned SEW = RISCVVType::decodeVSEW(VSEW);
auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMUL);
uint64_t MaxVL = Subtarget.getRealMaxVLen() / SEW;
@@ -20168,7 +20168,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
// Helper to find Masked Pseudo instruction from MC instruction, LMUL and SEW.
static const RISCV::RISCVMaskedPseudoInfo *
-lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVII::VLMUL LMul, unsigned SEW) {
+lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVVType::VLMUL LMul, unsigned SEW) {
const RISCVVInversePseudosTable::PseudoInfo *Inverse =
RISCVVInversePseudosTable::getBaseInfo(MCOpcode, LMul, SEW);
assert(Inverse && "Unexpected LMUL and SEW pair for instruction");
@@ -20211,7 +20211,7 @@ static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI,
/*IsImp*/ true));
// Emit a VFCVT_F_X
- RISCVII::VLMUL LMul = RISCVII::getLMul(MI.getDesc().TSFlags);
+ RISCVVType::VLMUL LMul = RISCVII::getLMul(MI.getDesc().TSFlags);
unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm();
// There is no E8 variant for VFCVT_F_X.
assert(Log2SEW >= 4);
@@ -23262,13 +23262,13 @@ bool RISCVTargetLowering::lowerDeinterleavedIntrinsicToVPLoad(
Load->getModule(), IntrMaskIds[Factor - 2],
{VecTupTy, Mask->getType(), EVL->getType()});
- Value *Operands[] = {
- PoisonVal,
- Load->getArgOperand(0),
- Mask,
- EVL,
- ConstantInt::get(XLenTy, RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC),
- ConstantInt::get(XLenTy, Log2_64(SEW))};
+ Value *Operands[] = {PoisonVal,
+ Load->getArgOperand(0),
+ Mask,
+ EVL,
+ ConstantInt::get(XLenTy, RISCVVType::TAIL_AGNOSTIC |
+ RISCVVType::MASK_AGNOSTIC),
+ ConstantInt::get(XLenTy, Log2_64(SEW))};
CallInst *VlsegN = Builder.CreateCall(VlsegNFunc, Operands);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index e9dd8ff96fa37..26b888653c81d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -823,7 +823,7 @@ class RISCVTargetLowering : public TargetLowering {
// Return the value of VLMax for the given vector type (i.e. SEW and LMUL)
SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const;
- static RISCVII::VLMUL getLMUL(MVT VT);
+ static RISCVVType::VLMUL getLMUL(MVT VT);
inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize,
unsigned MinSize) {
// Original equation:
@@ -839,7 +839,7 @@ class RISCVTargetLowering : public TargetLowering {
static std::pair<unsigned, unsigned>
computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget);
- static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
+ static unsigned getRegClassIDForLMUL(RISCVVType::VLMUL LMul);
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
static unsigned getRegClassIDForVecVT(MVT VT);
static std::pair<unsigned, unsigned>
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index ffc7e09368824..7433603daff85 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -342,7 +342,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const DemandedFields &DF) {
}
#endif
-static bool isLMUL1OrSmaller(RISCVII::VLMUL LMUL) {
+static bool isLMUL1OrSmaller(RISCVVType::VLMUL LMUL) {
auto [LMul, Fractional] = RISCVVType::decodeVLMUL(LMUL);
return Fractional || LMul == 1;
}
@@ -564,7 +564,7 @@ class VSETVLIInfo {
} State = Uninitialized;
// Fields from VTYPE.
- RISCVII::VLMUL VLMul = RISCVII::LMUL_1;
+ RISCVVType::VLMUL VLMul = RISCVVType::LMUL_1;
uint8_t SEW = 0;
uint8_t TailAgnostic : 1;
uint8_t MaskAgnostic : 1;
@@ -642,7 +642,7 @@ class VSETVLIInfo {
}
unsigned getSEW() const { return SEW; }
- RISCVII::VLMUL getVLMUL() const { return VLMul; }
+ RISCVVType::VLMUL getVLMUL() const { return VLMul; }
bool getTailAgnostic() const { return TailAgnostic; }
bool getMaskAgnostic() const { return MaskAgnostic; }
@@ -707,7 +707,7 @@ class VSETVLIInfo {
TailAgnostic = RISCVVType::isTailAgnostic(VType);
MaskAgnostic = RISCVVType::isMaskAgnostic(VType);
}
- void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA) {
+ void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA) {
assert(isValid() && !isUnknown() &&
"Can't set VTYPE for uninitialized or unknown");
VLMul = L;
@@ -716,7 +716,7 @@ class VSETVLIInfo {
MaskAgnostic = MA;
}
- void setVLMul(RISCVII::VLMUL VLMul) { this->VLMul = VLMul; }
+ void setVLMul(RISCVVType::VLMUL VLMul) { this->VLMul = VLMul; }
unsigned encodeVTYPE() const {
assert(isValid() && !isUnknown() && !SEWLMULRatioOnly &&
@@ -1018,7 +1018,7 @@ RISCVInsertVSETVLI::getInfoForVSETVLI(const MachineInstr &MI) const {
}
static unsigned computeVLMAX(unsigned VLEN, unsigned SEW,
- RISCVII::VLMUL VLMul) {
+ RISCVVType::VLMUL VLMul) {
auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMul);
if (Fractional)
VLEN = VLEN / LMul;
@@ -1043,17 +1043,18 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
if (RISCVII::hasVecPolicyOp(TSFlags)) {
const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
uint64_t Policy = Op.getImm();
- assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
+ assert(Policy <=
+ (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) &&
"Invalid Policy Value");
- TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
- MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
+ TailAgnostic = Policy & RISCVVType::TAIL_AGNOSTIC;
+ MaskAgnostic = Policy & RISCVVType::MASK_AGNOSTIC;
}
if (!RISCVII::usesMaskPolicy(TSFlags))
MaskAgnostic = true;
}
- RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
+ RISCVVType::VLMUL VLMul = RISCVII::getLMul(TSFlags);
unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm();
// A Log2SEW of 0 is an operation on mask registers only.
@@ -1245,8 +1246,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
// be coalesced into another vsetvli since we won't demand any fields.
VSETVLIInfo NewInfo; // Need a new VSETVLIInfo to clear SEWLMULRatioOnly
NewInfo.setAVLImm(1);
- NewInfo.setVTYPE(RISCVII::VLMUL::LMUL_1, /*sew*/ 8, /*ta*/ true,
- /*ma*/ true);
+ NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true);
Info = NewInfo;
return;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 456fb66917216..8f7db34561749 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -193,7 +193,7 @@ static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
const MachineBasicBlock &MBB,
MachineBasicBlock::const_iterator MBBI,
MachineBasicBlock::const_iterator &DefMBBI,
- RISCVII::VLMUL LMul) {
+ RISCVVType::VLMUL LMul) {
if (PreferWholeRegisterMove)
return false;
@@ -223,7 +223,7 @@ static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
if (!FirstVSetVLI) {
FirstVSetVLI = true;
unsigned FirstVType = MBBI->getOperand(2).getImm();
- RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
+ RISCVVType::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
FirstSEW = RISCVVType::getSEW(FirstVType);
// The first encountered vsetvli must have the same lmul as the
// register class of COPY.
@@ -326,7 +326,7 @@ void RISCVInstrInfo::copyPhysRegVector(
const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc,
const TargetRegisterClass *RegClass) const {
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
- RISCVII::VLMUL LMul = RISCVRI::getLMul(RegClass->TSFlags);
+ RISCVVType::VLMUL LMul = RISCVRI::getLMul(RegClass->TSFlags);
unsigned NF = RISCVRI::getNF(RegClass->TSFlags);
uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
@@ -345,7 +345,7 @@ void RISCVInstrInfo::copyPhysRegVector(
unsigned I = 0;
auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding)
- -> std::tuple<RISCVII::VLMUL, const TargetRegisterClass &, unsigned,
+ -> std::tuple<RISCVVType::VLMUL, const TargetRegisterClass &, unsigned,
unsigned, unsigned> {
if (ReversedCopy) {
// For reversed copying, if there are enough aligned registers(8/4/2), we
@@ -357,40 +357,40 @@ void RISCVInstrInfo::copyPhysRegVector(
uint16_t Diff = DstEncoding - SrcEncoding;
if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
DstEncoding % 8 == 7)
- return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
+ return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
DstEncoding % 4 == 3)
- return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
+ return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
DstEncoding % 2 == 1)
- return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
+ return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
// Or we should do LMUL1 copying.
- return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
+ return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
}
// For forward copying, if source register encoding and destination register
// encoding are aligned to 8/4/2, we can do a LMUL8/4/2 copying.
if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
- return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
+ return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
- return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
+ return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
- return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
+ return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
// Or we should do LMUL1 copying.
- return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
+ return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
};
auto FindRegWithEncoding = [TRI](const TargetRegisterClass &RegClass,
uint16_t Encoding) {
MCRegister Reg = RISCV::V0 + Encoding;
- if (RISCVRI::getLMul(RegClass.TSFlags) == RISCVII::LMUL_1)
+ if (RISCVRI::getLMul(RegClass.TSFlags) == RISCVVType::LMUL_1)
return Reg;
return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
};
@@ -2580,7 +2580,8 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
Ok = Imm >= 0 && Imm < RISCVCC::COND_INVALID;
break;
case RISCVOp::OPERAND_VEC_POLICY:
- Ok = (Imm & (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) == Imm;
+ Ok = (Imm &
+ (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) == Imm;
break;
case RISCVOp::OPERAND_SEW:
Ok = (isUInt<5>(Imm) && RISCVVType::isValidSEW(1 << Imm));
@@ -2648,7 +2649,7 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
return false;
}
uint64_t Policy = MI.getOperand(OpIdx).getImm();
- if (Policy > (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) {
+ if (Policy > (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) {
ErrInfo = "Invalid Policy Value";
return false;
}
@@ -3234,10 +3235,10 @@ std::string RISCVInstrInfo::createMIROperandComment(
}
case RISCVOp::OPERAND_VEC_POLICY:
unsigned Policy = Op.getImm();
- assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
+ assert(Policy <= (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) &&
"Invalid Policy Value");
- OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
- << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
+ OS << (Policy & RISCVVType::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
+ << (Policy & RISCVVType::MASK_AGNOSTIC ? "ma" : "mu");
break;
}
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index 6c4e9c7b1bdc7..0830191dde3f4 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -43,8 +43,9 @@ static inline bool isVRegClass(uint64_t TSFlags) {
}
/// \returns the LMUL for the register class.
-static inline RISCVII::VLMUL getLMul(uint64_t TSFlags) {
- return static_cast<RISCVII::VLMUL>((TSFlags & VLMulShiftMask) >> VLMulShift);
+static inline RISCVVType::VLMUL getLMul(uint64_t TSFlags) {
+ return static_cast<RISCVVType::VLMUL>((TSFlags & VLMulShiftMask) >>
+ VLMulShift);
}
/// \returns the NF for the register class.
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index da77bae18962c..79e3b9ee09744 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -765,9 +765,11 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
}
static unsigned isM1OrSmaller(MVT VT) {
- RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
- return (LMUL == RISCVII::VLMUL::LMUL_F8 || LMUL == RISCVII::VLMUL::LMUL_F4 ||
- LMUL == RISCVII::VLMUL::LMUL_F2 || LMUL == RISCVII::VLMUL::LMUL_1);
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+ return (LMUL == RISCVVType::VLMUL::LMUL_F8 ||
+ LMUL == RISCVVType::VLMUL::LMUL_F4 ||
+ LMUL == RISCVVType::VLMUL::LMUL_F2 ||
+ LMUL == RISCVVType::VLMUL::LMUL_1);
}
InstructionCost RISCVTTIImpl::getScalarizationOverhead(
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 1ba7f0b522a2b..e5a98598370ec 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -65,13 +65,13 @@ class RISCVVLOptimizer : public MachineFunctionPass {
/// Represents the EMUL and EEW of a MachineOperand.
struct OperandInfo {
// Represent as 1,2,4,8, ... and fractional indicator. This is because
- // EMUL can take on values that don't map to RISCVII::VLMUL values exactly.
+ // EMUL can take on values that don't map to RISCVVType::VLMUL values exactly.
// For example, a mask operand can have an EMUL less than MF8.
std::optional<std::pair<unsigned, bool>> EMUL;
unsigned Log2EEW;
- OperandInfo(RISCVII::VLMUL EMUL, unsigned Log2EEW)
+ OperandInfo(RISCVVType::VLMUL EMUL, unsigned Log2EEW)
: EMUL(RISCVVType::decodeVLMUL(EMUL)), Log2EEW(Log2EEW) {}
OperandInfo(std::pair<unsigned, bool> EMUL, unsigned Log2EEW)
@@ -141,7 +141,7 @@ static raw_ostream &operator<<(raw_ostream &OS,
/// SEW are from the TSFlags of MI.
static std::pair<unsigned, bool>
getEMULEqualsEEWDivSEWTimesLMUL(unsigned Log2EEW, const MachineInstr &MI) {
- RISCVII::VLMUL MIVLMUL = RISCVII::getLMul(MI.getDesc().TSFlags);
+ RISCVVType::VLMUL MIVLMUL = RISCVII::getLMul(MI.getDesc().TSFlags);
auto [MILMUL, MILMULIsFractional] = RISCVVType::decodeVLMUL(MIVLMUL);
unsigned MILog2SEW =
MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm();
diff --git a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp
index 0bddbacc89e3e..ee90868d252e4 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp
@@ -123,7 +123,7 @@ class RISCVVectorMaskDAGMutation : public ScheduleDAGMutation {
// For LMUL=8 cases, there will be more possibilities to spill.
// FIXME: We should use RegPressureTracker to do fine-grained
// controls.
- RISCVII::getLMul(MI->getDesc().TSFlags) != RISCVII::LMUL_8)
+ RISCVII::getLMul(MI->getDesc().TSFlags) != RISCVVType::LMUL_8)
DAG->addEdge(&SU, SDep(NearestUseV0SU, SDep::Artificial));
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index 5ef1c9444f59a..7c05ff1f1a70e 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -371,7 +371,7 @@ bool RISCVVectorPeephole::convertAllOnesVMergeToVMv(MachineInstr &MI) const {
MI.removeOperand(2); // False operand
MI.removeOperand(3); // Mask operand
MI.addOperand(
- MachineOperand::CreateImm(RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED));
+ MachineOperand::CreateImm(RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED));
// vmv.v.v doesn't have a mask operand, so we may be able to inflate the
// register class for the destination and passthru operands e.g. VRNoV0 -> VR
@@ -438,7 +438,7 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) {
MI.removeOperand(2); // False operand
MI.removeOperand(3); // Mask operand
MI.addOperand(
- MachineOperand::CreateImm(RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED));
+ MachineOperand::CreateImm(RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED));
// vmv.v.v doesn't have a mask operand, so we may be able to inflate the
// register class for the destination and passthru operands e.g. VRNoV0 -> VR
@@ -580,7 +580,7 @@ bool RISCVVectorPeephole::foldUndefPassthruVMV_V_V(MachineInstr &MI) {
Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc()));
if (RISCV::isVLKnownLE(MIVL, SrcVL))
- SrcPolicy.setImm(SrcPolicy.getImm() | RISCVII::TAIL_AGNOSTIC);
+ SrcPolicy.setImm(SrcPolicy.getImm() | RISCVVType::TAIL_AGNOSTIC);
}
MRI->replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(2).getReg());
@@ -646,10 +646,10 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
}
// If MI was tail agnostic and the VL didn't increase, preserve it.
- int64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
- if ((MI.getOperand(5).getImm() & RISCVII::TAIL_AGNOSTIC) &&
+ int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) &&
RISCV::isVLKnownLE(MI.getOperand(3), SrcVL))
- Policy |= RISCVII::TAIL_AGNOSTIC;
+ Policy |= RISCVVType::TAIL_AGNOSTIC;
Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy);
MRI->replaceRegWith(MI.getOperand(0).getReg(), Src->getOperand(0).getReg());
diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp
index 625645a99e12f..4111f8bfd2662 100644
--- a/llvm/lib/TargetParser/RISCVTargetParser.cpp
+++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp
@@ -165,12 +165,12 @@ namespace RISCVVType {
// 6 | vta | Vector tail agnostic
// 5:3 | vsew[2:0] | Standard element width (SEW) setting
// 2:0 | vlmul[2:0] | Vector register group multiplier (LMUL) setting
-unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
+unsigned encodeVTYPE(VLMUL VLMul, unsigned SEW, bool TailAgnostic,
bool MaskAgnostic) {
assert(isValidSEW(SEW) && "Invalid SEW");
- unsigned VLMULBits = static_cast<unsigned>(VLMUL);
+ unsigned VLMulBits = static_cast<unsigned>(VLMul);
unsigned VSEWBits = encodeSEW(SEW);
- unsigned VTypeI = (VSEWBits << 3) | (VLMULBits & 0x7);
+ unsigned VTypeI = (VSEWBits << 3) | (VLMulBits & 0x7);
if (TailAgnostic)
VTypeI |= 0x40;
if (MaskAgnostic)
@@ -179,19 +179,19 @@ unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
return VTypeI;
}
-std::pair<unsigned, bool> decodeVLMUL(RISCVII::VLMUL VLMUL) {
- switch (VLMUL) {
+std::pair<unsigned, bool> decodeVLMUL(VLMUL VLMul) {
+ switch (VLMul) {
default:
llvm_unreachable("Unexpected LMUL value!");
- case RISCVII::VLMUL::LMUL_1:
- case RISCVII::VLMUL::LMUL_2:
- case RISCVII::VLMUL::LMUL_4:
- case RISCVII::VLMUL::LMUL_8:
- return std::make_pair(1 << static_cast<unsigned>(VLMUL), false);
- case RISCVII::VLMUL::LMUL_F2:
- case RISCVII::VLMUL::LMUL_F4:
- case RISCVII::VLMUL::LMUL_F8:
- return std::make_pair(1 << (8 - static_cast<unsigned>(VLMUL)), true);
+ case LMUL_1:
+ case LMUL_2:
+ case LMUL_4:
+ case LMUL_8:
+ return std::make_pair(1 << static_cast<unsigned>(VLMul), false);
+ case LMUL_F2:
+ case LMUL_F4:
+ case LMUL_F8:
+ return std::make_pair(1 << (8 - static_cast<unsigned>(VLMul)), true);
}
}
@@ -220,7 +220,7 @@ void printVType(unsigned VType, raw_ostream &OS) {
OS << ", mu";
}
-unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) {
+unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul) {
unsigned LMul;
bool Fractional;
std::tie(LMul, Fractional) = decodeVLMUL(VLMul);
@@ -232,9 +232,8 @@ unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) {
return (SEW * 8) / LMul;
}
-std::optional<RISCVII::VLMUL>
-getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW) {
- unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMUL);
+std::optional<VLMUL> getSameRatioLMUL(unsigned SEW, VLMUL VLMul, unsigned EEW) {
+ unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMul);
unsigned EMULFixedPoint = (EEW * 8) / Ratio;
bool Fractional = EMULFixedPoint < 8;
unsigned EMUL = Fractional ? 8 / EMULFixedPoint : EMULFixedPoint / 8;
diff --git a/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp b/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp
index 68338b569a208..63ac8f993ecdc 100644
--- a/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp
@@ -14,20 +14,20 @@ using namespace llvm;
namespace {
TEST(RISCVVType, CheckSameRatioLMUL) {
// Smaller LMUL.
- EXPECT_EQ(RISCVII::LMUL_1,
- RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_2, 8));
- EXPECT_EQ(RISCVII::LMUL_F2,
- RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_1, 8));
+ EXPECT_EQ(RISCVVType::LMUL_1,
+ RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_2, 8));
+ EXPECT_EQ(RISCVVType::LMUL_F2,
+ RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_1, 8));
// Smaller fractional LMUL.
- EXPECT_EQ(RISCVII::LMUL_F8,
- RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_F4, 8));
+ EXPECT_EQ(RISCVVType::LMUL_F8,
+ RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_F4, 8));
// Bigger LMUL.
- EXPECT_EQ(RISCVII::LMUL_2,
- RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_1, 16));
- EXPECT_EQ(RISCVII::LMUL_1,
- RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_F2, 16));
+ EXPECT_EQ(RISCVVType::LMUL_2,
+ RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_1, 16));
+ EXPECT_EQ(RISCVVType::LMUL_1,
+ RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_F2, 16));
// Bigger fractional LMUL.
- EXPECT_EQ(RISCVII::LMUL_F2,
- RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_F4, 16));
+ EXPECT_EQ(RISCVVType::LMUL_F2,
+ RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_F4, 16));
}
} // namespace
More information about the llvm-commits
mailing list