[llvm] [NFC][LoopVectorize] Introduce new getEstimatedRuntimeVF function (PR #116247)
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 18 05:39:47 PST 2024
https://github.com/david-arm updated https://github.com/llvm/llvm-project/pull/116247
>From d992006933dc4e2edc4c453a319ea8c8456334ea Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Mon, 18 Nov 2024 13:37:57 +0000
Subject: [PATCH] [NFC][LoopVectorize] Introduce new getEstimatedRuntimeVF
function
There are lots of places where we try to estimate the runtime
vectorisation factor based on the getVScaleForTuning TTI hook.
I've added a new getEstimatedRuntimeVF function and taught
several places in the vectoriser to use this new function.
---
.../Transforms/Vectorize/LoopVectorize.cpp | 76 +++++++++----------
1 file changed, 35 insertions(+), 41 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 1d9e4f5a19f5ce..a398cc4ebc199f 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1519,7 +1519,7 @@ class LoopVectorizationCostModel {
/// \p Multiplier is an aditional scaling factor applied to VF before
/// comparing to EpilogueVectorizationMinVF.
bool isEpilogueVectorizationProfitable(const ElementCount VF,
- const unsigned Multiplier) const;
+ const unsigned IC) const;
/// Returns the execution time cost of an instruction for a given vector
/// width. Vector width of one means scalar.
@@ -4291,6 +4291,21 @@ getVScaleForTuning(const Loop *L, const TargetTransformInfo &TTI) {
return TTI.getVScaleForTuning();
}
+/// This functions attempts to return a value that represents the vectorization
+/// factor at runtime. For fixed-width VFs we know this precisely at compile
+/// time, but for scalable VFs we calculate it based on an estimate of the
+/// vscale value.
+static unsigned getEstimatedRuntimeVF(const Loop *L,
+ const TargetTransformInfo &TTI,
+ ElementCount VF) {
+ unsigned EstimatedVF = VF.getKnownMinValue();
+ if (VF.isScalable())
+ if (std::optional<unsigned> VScale = getVScaleForTuning(L, TTI))
+ EstimatedVF *= *VScale;
+ assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
+ return EstimatedVF;
+}
+
bool LoopVectorizationPlanner::isMoreProfitable(
const VectorizationFactor &A, const VectorizationFactor &B,
const unsigned MaxTripCount) const {
@@ -4593,17 +4608,13 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() {
InstructionCost C = CM.expectedCost(VF);
VectorizationFactor Candidate(VF, C, ScalarCost.ScalarCost);
- unsigned AssumedMinimumVscale =
- getVScaleForTuning(OrigLoop, TTI).value_or(1);
- unsigned Width =
- Candidate.Width.isScalable()
- ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
- : Candidate.Width.getFixedValue();
+ unsigned Width = getEstimatedRuntimeVF(OrigLoop, TTI, Candidate.Width);
LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << VF
<< " costs: " << (Candidate.Cost / Width));
if (VF.isScalable())
LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
- << AssumedMinimumVscale << ")");
+ << getVScaleForTuning(OrigLoop, TTI).value_or(1)
+ << ")");
LLVM_DEBUG(dbgs() << ".\n");
if (!ForceVectorization && !willGenerateVectors(*P, VF, TTI)) {
@@ -4669,7 +4680,7 @@ bool LoopVectorizationPlanner::isCandidateForEpilogueVectorization(
}
bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
- const ElementCount VF, const unsigned Multiplier) const {
+ const ElementCount VF, const unsigned IC) const {
// FIXME: We need a much better cost-model to take different parameters such
// as register pressure, code size increase and cost of extra branches into
// account. For now we apply a very crude heuristic and only consider loops
@@ -4684,9 +4695,13 @@ bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
if (TTI.getMaxInterleaveFactor(VF) <= 1)
return false;
- if ((Multiplier * VF.getKnownMinValue()) >= EpilogueVectorizationMinVF)
- return true;
- return false;
+ // TODO: PR #108190 introduced a discrepancy between fixed-width and scalable
+ // VFs when deciding profitability.
+ // See related "TODO: extend to support scalable VFs." in
+ // selectEpilogueVectorizationFactor.
+ unsigned Multiplier = VF.isFixed() ? IC : 1;
+ return getEstimatedRuntimeVF(TheLoop, TTI, VF * Multiplier) >=
+ EpilogueVectorizationMinVF;
}
VectorizationFactor LoopVectorizationPlanner::selectEpilogueVectorizationFactor(
@@ -4729,11 +4744,7 @@ VectorizationFactor LoopVectorizationPlanner::selectEpilogueVectorizationFactor(
return Result;
}
- unsigned Multiplier = IC;
- if (MainLoopVF.isScalable())
- Multiplier = getVScaleForTuning(OrigLoop, TTI).value_or(1);
-
- if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, Multiplier)) {
+ if (!CM.isEpilogueVectorizationProfitable(MainLoopVF, IC)) {
LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
"this loop\n");
return Result;
@@ -4742,12 +4753,8 @@ VectorizationFactor LoopVectorizationPlanner::selectEpilogueVectorizationFactor(
// If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
// the main loop handles 8 lanes per iteration. We could still benefit from
// vectorizing the epilogue loop with VF=4.
- ElementCount EstimatedRuntimeVF = MainLoopVF;
- if (MainLoopVF.isScalable()) {
- EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
- if (std::optional<unsigned> VScale = getVScaleForTuning(OrigLoop, TTI))
- EstimatedRuntimeVF *= *VScale;
- }
+ ElementCount EstimatedRuntimeVF =
+ ElementCount::getFixed(getEstimatedRuntimeVF(OrigLoop, TTI, MainLoopVF));
ScalarEvolution &SE = *PSE.getSE();
Type *TCType = Legal->getWidestInductionType();
@@ -4987,13 +4994,7 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
}
- unsigned EstimatedVF = VF.getKnownMinValue();
- if (VF.isScalable()) {
- if (std::optional<unsigned> VScale = getVScaleForTuning(TheLoop, TTI))
- EstimatedVF *= *VScale;
- }
- assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1");
-
+ unsigned EstimatedVF = getEstimatedRuntimeVF(TheLoop, TTI, VF);
unsigned KnownTC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
if (KnownTC > 0) {
// At least one iteration must be scalar when this constraint holds. So the
@@ -9797,8 +9798,8 @@ static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
}
static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks,
- VectorizationFactor &VF,
- std::optional<unsigned> VScale, Loop *L,
+ VectorizationFactor &VF, Loop *L,
+ const TargetTransformInfo &TTI,
PredicatedScalarEvolution &PSE,
ScalarEpilogueLowering SEL) {
InstructionCost CheckCost = Checks.getCost();
@@ -9850,13 +9851,7 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks,
// For now we assume the epilogue cost EpiC = 0 for simplicity. Note that
// the computations are performed on doubles, not integers and the result
// is rounded up, hence we get an upper estimate of the TC.
- unsigned IntVF = VF.Width.getKnownMinValue();
- if (VF.Width.isScalable()) {
- unsigned AssumedMinimumVscale = 1;
- if (VScale)
- AssumedMinimumVscale = *VScale;
- IntVF *= AssumedMinimumVscale;
- }
+ unsigned IntVF = getEstimatedRuntimeVF(L, TTI, VF.Width);
uint64_t RtC = *CheckCost.getValue();
uint64_t Div = ScalarC * IntVF - *VF.Cost.getValue();
uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div);
@@ -10105,8 +10100,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
bool ForceVectorization =
Hints.getForce() == LoopVectorizeHints::FK_Enabled;
if (!ForceVectorization &&
- !areRuntimeChecksProfitable(Checks, VF, getVScaleForTuning(L, *TTI), L,
- PSE, SEL)) {
+ !areRuntimeChecksProfitable(Checks, VF, L, *TTI, PSE, SEL)) {
ORE->emit([&]() {
return OptimizationRemarkAnalysisAliasing(
DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(),
More information about the llvm-commits
mailing list