[llvm] [LICM] Hoisting writeonly calls (PR #143799)

Tim Gymnich via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 16 01:47:26 PDT 2025


================
@@ -2354,6 +2315,75 @@ collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
   return Result;
 }
 
+// For a given store instruction or writeonly call instruction, this function
+// checks that there are no read or writes that conflict with the memory
+// access in the instruction
+static bool noConflictingReadWrites(Instruction *I, MemorySSA *MSSA,
+                                    AAResults *AA, Loop *CurLoop,
+                                    SinkAndHoistLICMFlags &Flags) {
+  assert(isa<CallInst>(*I) || isa<StoreInst>(*I));
+  // If there are more accesses than the Promotion cap, then give up as we're
+  // not walking a list that long.
+  if (Flags.tooManyMemoryAccesses()) {
+    return false;
+  }
+
+  auto *IMD = MSSA->getMemoryAccess(I);
+  BatchAAResults BAA(*AA);
+  auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, IMD);
+  // Make sure there are no clobbers inside the loop.
+  if (!MSSA->isLiveOnEntryDef(Source) && CurLoop->contains(Source->getBlock()))
+    return false;
+
+  // If there are interfering Uses (i.e. their defining access is in the
+  // loop), or ordered loads (stored as Defs!), don't move this store.
+  // Could do better here, but this is conservatively correct.
+  // TODO: Cache set of Uses on the first walk in runOnLoop, update when
+  // moving accesses. Can also extend to dominating uses.
+  for (auto *BB : CurLoop->getBlocks())
+    if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
+      for (const auto &MA : *Accesses)
+        if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
+          auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
+                                               const_cast<MemoryUse *>(MU));
+          if (!MSSA->isLiveOnEntryDef(MD) && CurLoop->contains(MD->getBlock()))
+            return false;
+          // Disable hoisting past potentially interfering loads. Optimized
+          // Uses may point to an access outside the loop, as getClobbering
+          // checks the previous iteration when walking the backedge.
+          // FIXME: More precise: no Uses that alias I.
+          if (!Flags.getIsSink() && !MSSA->dominates(IMD, MU))
+            return false;
+        } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
+          if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
+            (void)LI; // Silence warning.
+            assert(!LI->isUnordered() && "Expected unordered load");
+            return false;
+          }
+          // Any call, while it may not be clobbering I, it may be a use.
+          if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
+            // Check if the call may read from the memory location written
+            // to by I. Check CI's attributes and arguments; the number of
+            // such checks performed is limited above by NoOfMemAccTooLarge.
+            if (auto *SI = dyn_cast<StoreInst>(I)) {
+              ModRefInfo MRI = BAA.getModRefInfo(CI, MemoryLocation::get(SI));
+              if (isModOrRefSet(MRI))
+                return false;
+            } else if (CallInst *SCI = dyn_cast<CallInst>(I)) {
+              // If the instruction we are wanting to hoist is also a call
+              // instruction then we need not check mod/ref info with itself
+              if (SCI == CI)
+                continue;
+              ModRefInfo MRI = BAA.getModRefInfo(CI, SCI);
+              if (isModOrRefSet(MRI))
+                return false;
+            }
+          }
+        }
----------------
tgymnich wrote:

nit: please try to reduce the maximum indentation level. Something like this would be nice:
```c++
auto *Accesses = MSSA->getBlockAccesses(BB);
if (!Accesses)
   continue;
```

https://github.com/llvm/llvm-project/pull/143799


More information about the llvm-commits mailing list