[llvm] [VPlan] Generalize noalias-licm-check to replicate regions (NFC) (PR #187017)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 17 06:12:09 PDT 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms

@llvm/pr-subscribers-vectorizers

Author: Ramkumar Ramachandra (artagnon)

<details>
<summary>Changes</summary>

In order to use the cannotHoistOrSinkWithNoAlias check in use-sites after replicate regions are created, generalize it to work to with replicate regions, using a variant of VPBlockUtils::blocksOnly.

---
Full diff: https://github.com/llvm/llvm-project/pull/187017.diff


2 Files Affected:

- (modified) llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp (+22-24) 
- (modified) llvm/lib/Transforms/Vectorize/VPlanUtils.h (+15) 


``````````diff
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index d389db07885c8..2f1cd38c92a64 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -188,24 +188,24 @@ class SinkStoreInfo {
 };
 
 /// Check if a memory operation doesn't alias with memory operations in blocks
-/// between \p FirstBB and \p LastBB using scoped noalias metadata. If
-/// \p SinkInfo is std::nullopt, only recipes that may write to memory are
-/// checked (for load hoisting). Otherwise recipes that both read and write
-/// memory are checked, and SCEV is used to prove no-alias between the group
-/// leader and other replicate recipes (for store sinking).
+/// between \p FirstBB and \p LastBB, which is expected to be a valid range in
+/// a shallow-traversal of the vector loop region in \p Plan. We check aliasing
+/// with using scoped noalias metadata. If \p SinkInfo is std::nullopt, only
+/// recipes that may write to memory are checked (for load hoisting). Otherwise
+/// recipes that both read and write memory are checked, and SCEV is used to
+/// prove no-alias between the group leader and other replicate recipes (for
+/// store sinking).
 static bool
-canHoistOrSinkWithNoAliasCheck(const MemoryLocation &MemLoc,
+canHoistOrSinkWithNoAliasCheck(const MemoryLocation &MemLoc, VPlan &Plan,
                                VPBasicBlock *FirstBB, VPBasicBlock *LastBB,
                                std::optional<SinkStoreInfo> SinkInfo = {}) {
   bool CheckReads = SinkInfo.has_value();
   if (!MemLoc.AATags.Scope)
     return false;
 
-  for (VPBlockBase *Block = FirstBB; Block;
-       Block = Block->getSingleSuccessor()) {
-    assert(Block->getNumSuccessors() <= 1 &&
-           "Expected at most one successor in block chain");
-    auto *VPBB = cast<VPBasicBlock>(Block);
+  for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
+           vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()),
+           FirstBB, LastBB)) {
     for (VPRecipeBase &R : *VPBB) {
       if (SinkInfo && SinkInfo->shouldSkip(R))
         continue;
@@ -223,14 +223,12 @@ canHoistOrSinkWithNoAliasCheck(const MemoryLocation &MemLoc,
       if (ScopedNoAliasAAResult::alias(*Loc, MemLoc) != AliasResult::NoAlias)
         return false;
     }
-
-    if (Block == LastBB)
-      break;
   }
   return true;
 }
 
-/// Collect either replicated Loads or Stores grouped by their address SCEV.
+/// Collect either replicated Loads or Stores grouped by their address SCEV, in
+/// a shallow-traversal of the vector loop region in \p Plan.
 template <unsigned Opcode>
 static SmallVector<SmallVector<VPReplicateRecipe *, 4>>
 collectGroupedReplicateMemOps(
@@ -241,9 +239,8 @@ collectGroupedReplicateMemOps(
   constexpr bool IsLoad = (Opcode == Instruction::Load);
   SmallDenseMap<const SCEV *, SmallVector<VPReplicateRecipe *, 4>>
       RecipesByAddress;
-  for (VPBlockBase *Block :
-       vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry())) {
-    auto *VPBB = cast<VPBasicBlock>(Block);
+  for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
+           vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
     for (VPRecipeBase &R : *VPBB) {
       auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
       if (!RepR || RepR->getOpcode() != Opcode || !FilterFn(RepR))
@@ -4850,7 +4847,8 @@ void VPlanTransforms::hoistPredicatedLoads(VPlan &Plan,
 
     // Check that the load doesn't alias with stores between first and last.
     auto LoadLoc = vputils::getMemoryLocation(*EarliestLoad);
-    if (!LoadLoc || !canHoistOrSinkWithNoAliasCheck(*LoadLoc, FirstBB, LastBB))
+    if (!LoadLoc ||
+        !canHoistOrSinkWithNoAliasCheck(*LoadLoc, Plan, FirstBB, LastBB))
       continue;
 
     // Collect common metadata from all loads in the group.
@@ -4885,7 +4883,7 @@ void VPlanTransforms::hoistPredicatedLoads(VPlan &Plan,
 static bool
 canSinkStoreWithNoAliasCheck(ArrayRef<VPReplicateRecipe *> StoresToSink,
                              PredicatedScalarEvolution &PSE, const Loop &L,
-                             VPTypeAnalysis &TypeInfo) {
+                             VPlan &Plan) {
   auto StoreLoc = vputils::getMemoryLocation(*StoresToSink.front());
   if (!StoreLoc || !StoreLoc->AATags.Scope)
     return false;
@@ -4897,8 +4895,10 @@ canSinkStoreWithNoAliasCheck(ArrayRef<VPReplicateRecipe *> StoresToSink,
 
   VPBasicBlock *FirstBB = StoresToSink.front()->getParent();
   VPBasicBlock *LastBB = StoresToSink.back()->getParent();
+  VPTypeAnalysis TypeInfo(Plan);
   SinkStoreInfo SinkInfo(StoresToSinkSet, *StoresToSink[0], PSE, L, TypeInfo);
-  return canHoistOrSinkWithNoAliasCheck(*StoreLoc, FirstBB, LastBB, SinkInfo);
+  return canHoistOrSinkWithNoAliasCheck(*StoreLoc, Plan, FirstBB, LastBB,
+                                        SinkInfo);
 }
 
 void VPlanTransforms::sinkPredicatedStores(VPlan &Plan,
@@ -4909,10 +4909,8 @@ void VPlanTransforms::sinkPredicatedStores(VPlan &Plan,
   if (Groups.empty())
     return;
 
-  VPTypeAnalysis TypeInfo(Plan);
-
   for (auto &Group : Groups) {
-    if (!canSinkStoreWithNoAliasCheck(Group, PSE, *L, TypeInfo))
+    if (!canSinkStoreWithNoAliasCheck(Group, PSE, *L, Plan))
       continue;
 
     // Use the last (most dominated) store's location for the unconditional
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.h b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
index c4cacebcd78ba..0408476b9b01d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
@@ -279,6 +279,21 @@ class VPBlockUtils {
     });
   }
 
+  /// A variant of blocksOnly that only returns blocks between \p FirstBB and \p
+  /// LastBB.
+  template <typename BlockTy, typename T>
+  static SmallVector<VPBasicBlock *>
+  blocksOnly(const T &Range, VPBasicBlock *FirstBB, VPBasicBlock *LastBB) {
+    auto Blocks = to_vector(blocksOnly<BlockTy, T>(Range));
+    auto *FirstIt = find(Blocks, FirstBB);
+    auto *LastIt = find(Blocks, LastBB);
+    assert(FirstIt != Blocks.end() && LastIt != Blocks.end() &&
+           "FirstBB and LastBB don't correspond to Range");
+    Blocks.erase(Blocks.begin(), FirstIt);
+    Blocks.erase(LastIt, Blocks.end());
+    return Blocks;
+  }
+
   /// Inserts \p BlockPtr on the edge between \p From and \p To. That is, update
   /// \p From's successor to \p To to point to \p BlockPtr and \p To's
   /// predecessor from \p From to \p BlockPtr. \p From and \p To are added to \p

``````````

</details>


https://github.com/llvm/llvm-project/pull/187017


More information about the llvm-commits mailing list