[llvm] 3e89aca - [RISCV] Rename getELEN to getELen [nfc]
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 31 11:27:07 PDT 2023
Author: Philip Reames
Date: 2023-08-31T11:27:00-07:00
New Revision: 3e89aca4463446cf17f75f492abd8724cb2b9f48
URL: https://github.com/llvm/llvm-project/commit/3e89aca4463446cf17f75f492abd8724cb2b9f48
DIFF: https://github.com/llvm/llvm-project/commit/3e89aca4463446cf17f75f492abd8724cb2b9f48.diff
LOG: [RISCV] Rename getELEN to getELen [nfc]
Let's follow the naming scheme use for DLen, XLen, and FLen.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVSubtarget.h
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 959935ff0bd56a..7c0feb5931bc52 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -157,7 +157,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
auto addRegClassForRVV = [this](MVT VT) {
// Disable the smallest fractional LMUL types if ELEN is less than
// RVVBitsPerBlock.
- unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELEN();
+ unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELen();
if (VT.getVectorMinNumElements() < MinElts)
return;
@@ -1333,7 +1333,7 @@ bool RISCVTargetLowering::shouldExpandGetVectorLength(EVT TripCountVT,
return true;
// Don't allow VF=1 if those types are't legal.
- if (VF < RISCV::RVVBitsPerBlock / Subtarget.getELEN())
+ if (VF < RISCV::RVVBitsPerBlock / Subtarget.getELen())
return true;
// VLEN=32 support is incomplete.
@@ -2341,7 +2341,7 @@ static bool useRVVForFixedLengthVectorVT(MVT VT,
}
// Reject elements larger than ELEN.
- if (EltVT.getSizeInBits() > Subtarget.getELEN())
+ if (EltVT.getSizeInBits() > Subtarget.getELen())
return false;
unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
@@ -2370,7 +2370,7 @@ static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
"Expected legal fixed length vector!");
unsigned MinVLen = Subtarget.getRealMinVLen();
- unsigned MaxELen = Subtarget.getELEN();
+ unsigned MaxELen = Subtarget.getELen();
MVT EltVT = VT.getVectorElementType();
switch (EltVT.SimpleTy) {
@@ -3222,7 +3222,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// XLenVT if we're producing a v8i1. This results in more consistent
// codegen across RV32 and RV64.
unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
- NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
+ NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen());
// If we have to use more than one INSERT_VECTOR_ELT then this
// optimization is likely to increase code size; avoid peforming it in
// such a case. We can use a load from a constant pool in this case.
@@ -3722,7 +3722,7 @@ static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const RISCVSubtarget &Subtarget) {
// Need to be able to widen the vector.
- if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
+ if (VT.getScalarSizeInBits() >= Subtarget.getELen())
return false;
// Both input must be extracts.
@@ -3766,7 +3766,7 @@ static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
int &OddSrc, const RISCVSubtarget &Subtarget) {
// We need to be able to widen elements to the next larger integer type.
- if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
+ if (VT.getScalarSizeInBits() >= Subtarget.getELen())
return false;
int Size = Mask.size();
@@ -4117,7 +4117,7 @@ static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV,
OddV = convertToScalableVector(VecContainerVT, OddV, DAG, Subtarget);
}
- assert(VecVT.getScalarSizeInBits() < Subtarget.getELEN());
+ assert(VecVT.getScalarSizeInBits() < Subtarget.getELen());
// We're working with a vector of the same size as the resulting
// interleaved vector, but with half the number of elements and
@@ -7385,7 +7385,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
unsigned WidenVecLen;
SDValue ExtractElementIdx;
SDValue ExtractBitIdx;
- unsigned MaxEEW = Subtarget.getELEN();
+ unsigned MaxEEW = Subtarget.getELen();
MVT LargestEltVT = MVT::getIntegerVT(
std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
if (NumElts <= LargestEltVT.getSizeInBits()) {
@@ -7686,7 +7686,7 @@ static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
// Determine the VF that corresponds to LMUL 1 for ElementWidth.
unsigned LMul1VF = RISCV::RVVBitsPerBlock / ElementWidth;
// We don't support VF==1 with ELEN==32.
- unsigned MinVF = RISCV::RVVBitsPerBlock / Subtarget.getELEN();
+ unsigned MinVF = RISCV::RVVBitsPerBlock / Subtarget.getELen();
unsigned VF = N->getConstantOperandVal(2);
assert(VF >= MinVF && VF <= (LMul1VF * 8) && isPowerOf2_32(VF) &&
@@ -8769,7 +8769,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_DEINTERLEAVE(SDValue Op,
// We can deinterleave through vnsrl.wi if the element type is smaller than
// ELEN
- if (VecVT.getScalarSizeInBits() < Subtarget.getELEN()) {
+ if (VecVT.getScalarSizeInBits() < Subtarget.getELen()) {
SDValue Even =
getDeinterleaveViaVNSRL(DL, VecVT, Concat, true, Subtarget, DAG);
SDValue Odd =
@@ -8838,7 +8838,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_INTERLEAVE(SDValue Op,
// If the element type is smaller than ELEN, then we can interleave with
// vwaddu.vv and vwmaccu.vx
- if (VecVT.getScalarSizeInBits() < Subtarget.getELEN()) {
+ if (VecVT.getScalarSizeInBits() < Subtarget.getELen()) {
Interleaved = getWideningInterleave(Op.getOperand(0), Op.getOperand(1), DL,
DAG, Subtarget);
} else {
@@ -17805,7 +17805,7 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
// a large scalar constant and instead use vmv.v.x/i to do the
// broadcast. For everything else, prefer ELenVT to minimize VL and thus
// maximize the chance we can encode the size in the vsetvli.
- MVT ELenVT = MVT::getIntegerVT(Subtarget.getELEN());
+ MVT ELenVT = MVT::getIntegerVT(Subtarget.getELen());
MVT PreferredVT = (Op.isMemset() && !Op.isZeroMemset()) ? MVT::i8 : ELenVT;
// Do we have sufficient alignment for our preferred VT? If not, revert
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index bfc68774f50612..e89093642429e6 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -143,7 +143,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
return 0;
}
- unsigned getELEN() const {
+ unsigned getELen() const {
assert(hasVInstructions() && "Expected V extension");
return hasVInstructionsI64() ? 64 : 32;
}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 58f13487a2fcd7..7cf8c7001e511a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -307,7 +307,7 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
// If the size of the element is < ELEN then shuffles of interleaves and
// deinterleaves of 2 vectors can be lowered into the following
// sequences
- if (EltTp.getScalarSizeInBits() < ST->getELEN()) {
+ if (EltTp.getScalarSizeInBits() < ST->getELen()) {
// Example sequence:
// vsetivli zero, 4, e8, mf4, ta, ma (ignored)
// vwaddu.vv v10, v8, v9
@@ -1186,8 +1186,8 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
// Skip if element size of Dst or Src is bigger than ELEN.
- if (Src->getScalarSizeInBits() > ST->getELEN() ||
- Dst->getScalarSizeInBits() > ST->getELEN())
+ if (Src->getScalarSizeInBits() > ST->getELen() ||
+ Dst->getScalarSizeInBits() > ST->getELen())
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
int ISD = TLI->InstructionOpcodeToISD(Opcode);
@@ -1270,7 +1270,7 @@ RISCVTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
// Skip if scalar size of Ty is bigger than ELEN.
- if (Ty->getScalarSizeInBits() > ST->getELEN())
+ if (Ty->getScalarSizeInBits() > ST->getELen())
return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
@@ -1297,7 +1297,7 @@ RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
// Skip if scalar size of Ty is bigger than ELEN.
- if (Ty->getScalarSizeInBits() > ST->getELEN())
+ if (Ty->getScalarSizeInBits() > ST->getELen())
return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
int ISD = TLI->InstructionOpcodeToISD(Opcode);
@@ -1332,7 +1332,7 @@ InstructionCost RISCVTTIImpl::getExtendedReductionCost(
FMF, CostKind);
// Skip if scalar size of ResTy is bigger than ELEN.
- if (ResTy->getScalarSizeInBits() > ST->getELEN())
+ if (ResTy->getScalarSizeInBits() > ST->getELen())
return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
FMF, CostKind);
@@ -1412,7 +1412,7 @@ InstructionCost RISCVTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
I);
// Skip if scalar size of ValTy is bigger than ELEN.
- if (ValTy->isVectorTy() && ValTy->getScalarSizeInBits() > ST->getELEN())
+ if (ValTy->isVectorTy() && ValTy->getScalarSizeInBits() > ST->getELen())
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
I);
@@ -1592,7 +1592,7 @@ InstructionCost RISCVTTIImpl::getArithmeticInstrCost(
Args, CxtI);
// Skip if scalar size of Ty is bigger than ELEN.
- if (isa<VectorType>(Ty) && Ty->getScalarSizeInBits() > ST->getELEN())
+ if (isa<VectorType>(Ty) && Ty->getScalarSizeInBits() > ST->getELen())
return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
Args, CxtI);
More information about the llvm-commits
mailing list