[llvm] [LV] Add option to still enable the legacy cost model. (PR #99536)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 18 11:00:21 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: Florian Hahn (fhahn)

<details>
<summary>Changes</summary>

This patch adds a new temporary option to still use the legacy cost model after https://github.com/llvm/llvm-project/pull/92555. It defaults to false and the only intended use is to adjust the default to true in the soon-to-be-cut release branch.

---
Full diff: https://github.com/llvm/llvm-project/pull/99536.diff


1 Files Affected:

- (modified) llvm/lib/Transforms/Vectorize/LoopVectorize.cpp (+20-10) 


``````````diff
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 748db418fee8c..7602c1021663b 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -205,6 +205,11 @@ static cl::opt<unsigned> VectorizeMemoryCheckThreshold(
     "vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
     cl::desc("The maximum allowed number of runtime memory checks"));
 
+static cl::opt<bool> UseLegacyCostModel(
+    "vectorize-use-legacy-cost-model", cl::init(false), cl::Hidden,
+    cl::desc("Use the legacy cost model instead of the VPlan-based cost model. "
+             "This option will be removed in the future."));
+
 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
 // that predication is preferred, and this lists all options. I.e., the
 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
@@ -10318,8 +10323,9 @@ bool LoopVectorizePass::processLoop(Loop *L) {
       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
                                  &CM, BFI, PSI, Checks);
 
-      VPlan &BestPlan = LVP.getBestPlan();
-      assert(BestPlan.hasScalarVFOnly() &&
+      VPlan &BestPlan =
+          UseLegacyCostModel ? LVP.getBestPlanFor(VF.Width) : LVP.getBestPlan();
+      assert((UseLegacyCostModel || BestPlan.hasScalarVFOnly()) &&
              "VPlan cost model and legacy cost model disagreed");
       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT, false);
 
@@ -10436,14 +10442,18 @@ bool LoopVectorizePass::processLoop(Loop *L) {
         if (!MainILV.areSafetyChecksAdded())
           DisableRuntimeUnroll = true;
       } else {
-        VPlan &BestPlan = LVP.getBestPlan();
-        assert(size(BestPlan.vectorFactors()) == 1 &&
-               "Plan should have a single VF");
-        ElementCount Width = *BestPlan.vectorFactors().begin();
-        LLVM_DEBUG(dbgs() << "VF picked by VPlan cost model: " << Width
-                          << "\n");
-        assert(VF.Width == Width &&
-               "VPlan cost model and legacy cost model disagreed");
+        ElementCount Width = VF.Width;
+        VPlan &BestPlan =
+            UseLegacyCostModel ? LVP.getBestPlanFor(Width) : LVP.getBestPlan();
+        if (!UseLegacyCostModel) {
+          assert(size(BestPlan.vectorFactors()) == 1 &&
+                 "Plan should have a single VF");
+          Width = *BestPlan.vectorFactors().begin();
+          LLVM_DEBUG(dbgs()
+                     << "VF picked by VPlan cost model: " << Width << "\n");
+          assert(VF.Width == Width &&
+                 "VPlan cost model and legacy cost model disagreed");
+        }
         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, Width,
                                VF.MinProfitableTripCount, IC, &LVL, &CM, BFI,
                                PSI, Checks);

``````````

</details>


https://github.com/llvm/llvm-project/pull/99536


More information about the llvm-commits mailing list