[llvm] 161ccfe - [MemorySSA] Pass DT to the upward iterator for proper PhiTranslation.

Alina Sbirlea via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 29 14:29:15 PDT 2020


Author: Alina Sbirlea
Date: 2020-04-29T14:28:31-07:00
New Revision: 161ccfe5bad52aba7518034cea4814aea0906c69

URL: https://github.com/llvm/llvm-project/commit/161ccfe5bad52aba7518034cea4814aea0906c69
DIFF: https://github.com/llvm/llvm-project/commit/161ccfe5bad52aba7518034cea4814aea0906c69.diff

LOG: [MemorySSA] Pass DT to the upward iterator for proper PhiTranslation.

Summary:
A valid DominatorTree is needed to do PhiTranslation.
Before this patch, a MemoryUse could be optimized to an access outside a loop, while the address it loads from is modified in the loop.
This can lead to a miscompile.

Reviewers: george.burgess.iv

Subscribers: Prazek, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79068

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/MemorySSA.h
    llvm/lib/Analysis/MemorySSA.cpp
    llvm/test/Analysis/MemorySSA/phi-translation.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h
index cc79b7ea776b..24b806b2f498 100644
--- a/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/llvm/include/llvm/Analysis/MemorySSA.h
@@ -723,6 +723,8 @@ class MemorySSA {
     return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
   }
 
+  DominatorTree &getDomTree() const { return *DT; }
+
   void dump() const;
   void print(raw_ostream &) const;
 
@@ -1179,9 +1181,9 @@ class upward_defs_iterator
   using BaseT = upward_defs_iterator::iterator_facade_base;
 
 public:
-  upward_defs_iterator(const MemoryAccessPair &Info)
+  upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT)
       : DefIterator(Info.first), Location(Info.second),
-        OriginalAccess(Info.first) {
+        OriginalAccess(Info.first), DT(DT) {
     CurrentPair.first = nullptr;
 
     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
@@ -1220,12 +1222,13 @@ class upward_defs_iterator
           const_cast<Value *>(Location.Ptr),
           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
-                                        DefIterator.getPhiArgBlock(), nullptr,
-                                        false))
+                                        DefIterator.getPhiArgBlock(), DT,
+                                        false)) {
         if (Translator.getAddr() != Location.Ptr) {
           CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
           return;
         }
+      }
     }
     CurrentPair.second = Location;
   }
@@ -1235,17 +1238,19 @@ class upward_defs_iterator
   MemoryLocation Location;
   MemoryAccess *OriginalAccess = nullptr;
   bool WalkingPhi = false;
+  DominatorTree *DT = nullptr;
 };
 
-inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
-  return upward_defs_iterator(Pair);
+inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair,
+                                              DominatorTree &DT) {
+  return upward_defs_iterator(Pair, &DT);
 }
 
 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
 
 inline iterator_range<upward_defs_iterator>
-upward_defs(const MemoryAccessPair &Pair) {
-  return make_range(upward_defs_begin(Pair), upward_defs_end());
+upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
+  return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
 }
 
 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that

diff  --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index aeb3fecf53f2..f2f5fd70f471 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -466,7 +466,8 @@ checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
 
       assert(isa<MemoryPhi>(MA));
       Worklist.append(
-          upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
+          upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second},
+                            MSSA.getDomTree()),
           upward_defs_end());
     }
   }
@@ -595,8 +596,8 @@ template <class AliasAnalysisType> class ClobberWalker {
 
   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
                    ListIndex PriorNode) {
-    auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
-                                 upward_defs_end());
+    auto UpwardDefs = make_range(
+        upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT), upward_defs_end());
     for (const MemoryAccessPair &P : UpwardDefs) {
       PausedSearches.push_back(Paths.size());
       Paths.emplace_back(P.second, P.first, PriorNode);

diff  --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll
index 4aa24f2c9aa2..4d8111654d56 100644
--- a/llvm/test/Analysis/MemorySSA/phi-translation.ll
+++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll
@@ -193,3 +193,45 @@ if.end:
   br label %while.cond
 }
 
+; CHECK-LABEL: define i32 @use_not_optimized_due_to_backedge
+define i32 @use_not_optimized_due_to_backedge(i32* nocapture %m_i_strides, i32* nocapture readonly %eval_left_dims) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK_NEXT: store i32 1, i32* %m_i_strides, align 4
+  store i32 1, i32* %m_i_strides, align 4
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.inc
+  ret i32 %m_i_size.1
+
+for.body:                                         ; preds = %entry, %for.inc
+; CHECK: 4 = MemoryPhi({entry,1},{for.inc,3})
+; CHECK-NEXT: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+  %m_i_size.022 = phi i32 [ 1, %entry ], [ %m_i_size.1, %for.inc ]
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %cmp1 = icmp eq i64 %indvars.iv, 0
+  %arrayidx2 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv
+; CHECK: MemoryUse(4) MayAlias
+; CHECK-NEXT: %0 = load i32, i32* %arrayidx2, align 4
+  %0 = load i32, i32* %arrayidx2, align 4
+  %arrayidx4 = getelementptr inbounds i32, i32* %eval_left_dims, i64 %indvars.iv
+; CHECK: MemoryUse(4) MayAlias
+; CHECK-NEXT: %1 = load i32, i32* %arrayidx4, align 4
+  %1 = load i32, i32* %arrayidx4, align 4
+  %mul = mul nsw i32 %1, %0
+  br i1 %cmp1, label %if.then, label %for.inc
+
+if.then:                                          ; preds = %for.body
+  %arrayidx7 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv.next
+; CHECK: 2 = MemoryDef(4)
+; CHECK-NEXT: store i32 %mul, i32* %arrayidx7, align 4
+  store i32 %mul, i32* %arrayidx7, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body, %if.then
+; CHECK: 3 = MemoryPhi({for.body,4},{if.then,2})
+; CHECK-NEXT: %m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ]
+  %m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ]
+  br i1 %cmp1, label %for.body, label %for.cond.cleanup
+}


        


More information about the llvm-commits mailing list