[llvm] b2f933a - [MemorySSA] Don't bail on phi starting access

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 13 01:59:29 PST 2021


Author: Nikita Popov
Date: 2021-03-13T10:53:13+01:00
New Revision: b2f933a6ce838a24f5e9728126c71d3ba44a99be

URL: https://github.com/llvm/llvm-project/commit/b2f933a6ce838a24f5e9728126c71d3ba44a99be
DIFF: https://github.com/llvm/llvm-project/commit/b2f933a6ce838a24f5e9728126c71d3ba44a99be.diff

LOG: [MemorySSA] Don't bail on phi starting access

When calling getClobberingMemoryAccess() with MemoryLocation on a
MemoryPHI starting access, the walker currently immediately bails
and returns the starting access. This makes sense for the API that
does not accept a location (as we wouldn't know what clobber we
should be checking for), but doesn't make sense for the
MemoryLocation-based API. This means that it can't look through
a MemoryPHI if it's the starting access, but can if there is one
more non-clobbering def in between. This patch removes the limitation.

Differential Revision: https://reviews.llvm.org/D98557

Added: 
    

Modified: 
    llvm/lib/Analysis/MemorySSA.cpp
    llvm/test/Transforms/MemCpyOpt/memcpy-in-loop.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index de9a28a8d004..83e634103409 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -2402,22 +2402,23 @@ MemoryAccess *
 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
     unsigned &UpwardWalkLimit) {
-  if (isa<MemoryPhi>(StartingAccess))
-    return StartingAccess;
+  assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
 
-  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
-  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
-    return StartingUseOrDef;
+  Instruction *I = nullptr;
+  if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
+    if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
+      return StartingUseOrDef;
 
-  Instruction *I = StartingUseOrDef->getMemoryInst();
+    I = StartingUseOrDef->getMemoryInst();
 
-  // Conservatively, fences are always clobbers, so don't perform the walk if we
-  // hit a fence.
-  if (!isa<CallBase>(I) && I->isFenceLike())
-    return StartingUseOrDef;
+    // Conservatively, fences are always clobbers, so don't perform the walk if
+    // we hit a fence.
+    if (!isa<CallBase>(I) && I->isFenceLike())
+      return StartingUseOrDef;
+  }
 
   UpwardsMemoryQuery Q;
-  Q.OriginalAccess = StartingUseOrDef;
+  Q.OriginalAccess = StartingAccess;
   Q.StartingLoc = Loc;
   Q.Inst = nullptr;
   Q.IsCall = false;
@@ -2425,16 +2426,14 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
   // Unlike the other function, do not walk to the def of a def, because we are
   // handed something we already believe is the clobbering access.
   // We never set SkipSelf to true in Q in this method.
-  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
-                                     ? StartingUseOrDef->getDefiningAccess()
-                                     : StartingUseOrDef;
-
   MemoryAccess *Clobber =
-      Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
-  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
-  LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
-  LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
-  LLVM_DEBUG(dbgs() << *Clobber << "\n");
+      Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
+  LLVM_DEBUG({
+    dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
+    if (I)
+      dbgs() << "  for instruction " << *I << "\n";
+    dbgs() << "  is " << *Clobber << "\n";
+  });
   return Clobber;
 }
 

diff  --git a/llvm/test/Transforms/MemCpyOpt/memcpy-in-loop.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-in-loop.ll
index c9786987fa21..1d806b83c280 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-in-loop.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-in-loop.ll
@@ -12,7 +12,6 @@ define void @test_copy_uninit([1000 x [1000 x i32]]* %arg) {
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[CURRENT:%.*]] = phi [1000 x i32]* [ [[BEGIN]], [[START:%.*]] ], [ [[NEXT:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[CURRENT_I8:%.*]] = bitcast [1000 x i32]* [[CURRENT]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 dereferenceable(4000) [[CURRENT_I8]], i8* nonnull align 4 dereferenceable(4000) [[ALLOCA_I8]], i64 4000, i1 false)
 ; CHECK-NEXT:    [[NEXT]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[CURRENT]], i64 1
 ; CHECK-NEXT:    [[COND:%.*]] = icmp eq [1000 x i32]* [[NEXT]], [[END]]
 ; CHECK-NEXT:    br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP]]
@@ -50,7 +49,7 @@ define void @test_copy_zero([1000 x [1000 x i32]]* %arg) {
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[CURRENT:%.*]] = phi [1000 x i32]* [ [[BEGIN]], [[START:%.*]] ], [ [[NEXT:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[CURRENT_I8:%.*]] = bitcast [1000 x i32]* [[CURRENT]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 dereferenceable(4000) [[CURRENT_I8]], i8* nonnull align 4 dereferenceable(4000) [[ALLOCA_I8]], i64 4000, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[CURRENT_I8]], i8 0, i64 4000, i1 false)
 ; CHECK-NEXT:    [[NEXT]] = getelementptr inbounds [1000 x i32], [1000 x i32]* [[CURRENT]], i64 1
 ; CHECK-NEXT:    [[COND:%.*]] = icmp eq [1000 x i32]* [[NEXT]], [[END]]
 ; CHECK-NEXT:    br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP]]


        


More information about the llvm-commits mailing list