[llvm] 1ce8201 - [MemorySSA] Restrict optimizations after a PhiTranslation.

Alina Sbirlea via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 3 14:49:38 PDT 2020


Author: Alina Sbirlea
Date: 2020-08-03T14:46:41-07:00
New Revision: 1ce82015f6d06f8026357e4faa925f900136b575

URL: https://github.com/llvm/llvm-project/commit/1ce82015f6d06f8026357e4faa925f900136b575
DIFF: https://github.com/llvm/llvm-project/commit/1ce82015f6d06f8026357e4faa925f900136b575.diff

LOG: [MemorySSA] Restrict optimizations after a PhiTranslation.

Merging alias results from different paths, when a path did phi
translation is not necesarily correct. Conservatively terminate such paths.
Aimed to fix PR46156.

Differential Revision: https://reviews.llvm.org/D84905

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/MemorySSA.h
    llvm/lib/Analysis/MemorySSA.cpp
    llvm/test/Analysis/MemorySSA/phi-translation.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h
index 5ce2b3fd047f..3ec09e8c0a45 100644
--- a/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/llvm/include/llvm/Analysis/MemorySSA.h
@@ -1214,6 +1214,8 @@ class upward_defs_iterator
 
   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
 
+  bool performedPhiTranslation() const { return PerformedPhiTranslation; }
+
 private:
   void fillInCurrentPair() {
     CurrentPair.first = *DefIterator;
@@ -1226,6 +1228,7 @@ class upward_defs_iterator
                                         false)) {
         if (Translator.getAddr() != Location.Ptr) {
           CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
+          PerformedPhiTranslation = true;
           return;
         }
       } else {
@@ -1240,8 +1243,9 @@ class upward_defs_iterator
   memoryaccess_def_iterator DefIterator;
   MemoryLocation Location;
   MemoryAccess *OriginalAccess = nullptr;
-  bool WalkingPhi = false;
   DominatorTree *DT = nullptr;
+  bool WalkingPhi = false;
+  bool PerformedPhiTranslation = false;
 };
 
 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair,

diff  --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index f2f5fd70f471..6fc827ae2b17 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -519,9 +519,16 @@ template <class AliasAnalysisType> class ClobberWalker {
   UpwardsMemoryQuery *Query;
   unsigned *UpwardWalkLimit;
 
-  // Phi optimization bookkeeping
+  // Phi optimization bookkeeping:
+  // List of DefPath to process during the current phi optimization walk.
   SmallVector<DefPath, 32> Paths;
+  // List of visited <Access, Location> pairs; we can skip paths already
+  // visited with the same memory location.
   DenseSet<ConstMemoryAccessPair> VisitedPhis;
+  // Record if phi translation has been performed during the current phi
+  // optimization walk, as merging alias results after phi translation can
+  // yield incorrect results. Context in PR46156.
+  bool PerformedPhiTranslation = false;
 
   /// Find the nearest def or phi that `From` can legally be optimized to.
   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
@@ -596,12 +603,13 @@ template <class AliasAnalysisType> class ClobberWalker {
 
   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
                    ListIndex PriorNode) {
-    auto UpwardDefs = make_range(
-        upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT), upward_defs_end());
+    auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT);
+    auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
     for (const MemoryAccessPair &P : UpwardDefs) {
       PausedSearches.push_back(Paths.size());
       Paths.emplace_back(P.second, P.first, PriorNode);
     }
+    PerformedPhiTranslation |= UpwardDefsBegin.performedPhiTranslation();
   }
 
   /// Represents a search that terminated after finding a clobber. This clobber
@@ -651,8 +659,16 @@ template <class AliasAnalysisType> class ClobberWalker {
       //   - We still cache things for A, so C only needs to walk up a bit.
       // If this behavior becomes problematic, we can fix without a ton of extra
       // work.
-      if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
+      if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
+        if (PerformedPhiTranslation) {
+          // If visiting this path performed Phi translation, don't continue,
+          // since it may not be correct to merge results from two paths if one
+          // relies on the phi translation.
+          TerminatedPath Term{Node.Last, PathIndex};
+          return Term;
+        }
         continue;
+      }
 
       const MemoryAccess *SkipStopWhere = nullptr;
       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
@@ -765,7 +781,7 @@ template <class AliasAnalysisType> class ClobberWalker {
   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
                              const MemoryLocation &Loc) {
-    assert(Paths.empty() && VisitedPhis.empty() &&
+    assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
            "Reset the optimization state.");
 
     Paths.emplace_back(Loc, Start, Phi, None);
@@ -921,6 +937,7 @@ template <class AliasAnalysisType> class ClobberWalker {
   void resetPhiOptznState() {
     Paths.clear();
     VisitedPhis.clear();
+    PerformedPhiTranslation = false;
   }
 
 public:

diff  --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll
index c39ccd31d24b..93ebc86210d1 100644
--- a/llvm/test/Analysis/MemorySSA/phi-translation.ll
+++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll
@@ -287,3 +287,85 @@ the.end:                            ; preds = %for.main.body
   ret void
 
 }
+
+
+ at c = local_unnamed_addr global [2 x i16] zeroinitializer, align 2
+
+define i32 @dont_merge_noalias_simple(i32* noalias %ptr) {
+; CHECK-LABEL: define i32 @dont_merge_noalias_simple
+; CHECK-LABEL: entry:
+; CHECK:       ; 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT:  store i16 1, i16* %s1.ptr, align 2
+
+; CHECK-LABEL: %for.body
+; CHECK:       ; MemoryUse(4) MayAlias
+; CHECK-NEXT:    %lv = load i16, i16* %arrayidx, align 2
+
+entry:
+  %s1.ptr = getelementptr inbounds [2 x i16], [2 x i16]* @c, i64 0, i64 0
+  store i16 1, i16* %s1.ptr, align 2
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %storemerge2 = phi i32 [ 1, %entry ], [ %dec, %for.body ]
+  %idxprom1 = zext i32 %storemerge2 to i64
+  %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* @c, i64 0, i64 %idxprom1
+  %lv = load i16, i16* %arrayidx, align 2
+  %conv = sext i16 %lv to i32
+  store i32 %conv, i32* %ptr, align 4
+  %dec = add nsw i32 %storemerge2, -1
+  %cmp = icmp sgt i32 %storemerge2, 0
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body
+  %s2.ptr = getelementptr inbounds [2 x i16], [2 x i16]* @c, i64 0, i64 0
+  store i16 0, i16* %s2.ptr, align 2
+  ret i32 0
+}
+
+
+define i32 @dont_merge_noalias_complex(i32* noalias %ptr, i32* noalias %another) {
+; CHECK-LABEL: define i32 @dont_merge_noalias_complex
+; CHECK-LABEL: entry:
+; CHECK:       ; 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT:  store i16 1, i16* %s1.ptr, align 2
+
+; CHECK-LABEL: %for.body
+; CHECK:       ; MemoryUse(7) MayAlias
+; CHECK-NEXT:    %lv = load i16, i16* %arrayidx, align 2
+
+entry:
+  %s1.ptr = getelementptr inbounds [2 x i16], [2 x i16]* @c, i64 0, i64 0
+  store i16 1, i16* %s1.ptr, align 2
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %storemerge2 = phi i32 [ 1, %entry ], [ %dec, %merge.body ]
+  %idxprom1 = zext i32 %storemerge2 to i64
+  %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* @c, i64 0, i64 %idxprom1
+  %lv = load i16, i16* %arrayidx, align 2
+  %conv = sext i16 %lv to i32
+  store i32 %conv, i32* %ptr, align 4
+  %dec = add nsw i32 %storemerge2, -1
+
+  %cmpif = icmp sgt i32 %storemerge2, 1
+  br i1 %cmpif, label %if.body, label %else.body
+
+if.body:
+  store i32 %conv, i32* %another, align 4
+  br label %merge.body
+
+else.body:
+  store i32 %conv, i32* %another, align 4
+  br label %merge.body
+
+merge.body:
+  %cmp = icmp sgt i32 %storemerge2, 0
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body
+  %s2.ptr = getelementptr inbounds [2 x i16], [2 x i16]* @c, i64 0, i64 0
+  store i16 0, i16* %s2.ptr, align 2
+  ret i32 0
+}
+


        


More information about the llvm-commits mailing list