[llvm] r353739 - [MemorySSA] Remove verifyClobberSanity.
Alina Sbirlea via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 11 11:51:21 PST 2019
Author: asbirlea
Date: Mon Feb 11 11:51:21 2019
New Revision: 353739
URL: http://llvm.org/viewvc/llvm-project?rev=353739&view=rev
Log:
[MemorySSA] Remove verifyClobberSanity.
Summary:
This verification may fail after certain transformations due to
BasicAA's fragility. Added a small explanation and a testcase that
triggers the assert in checkClobberSanity (before its removal).
Addresses PR40509.
Reviewers: george.burgess.iv
Subscribers: sanjoy, jlebar, llvm-commits, Prazek
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D57973
Added:
llvm/trunk/test/Analysis/MemorySSA/pr40509.ll
Modified:
llvm/trunk/include/llvm/Analysis/MemorySSA.h
llvm/trunk/lib/Analysis/MemorySSA.cpp
Modified: llvm/trunk/include/llvm/Analysis/MemorySSA.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/MemorySSA.h?rev=353739&r1=353738&r2=353739&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/MemorySSA.h (original)
+++ llvm/trunk/include/llvm/Analysis/MemorySSA.h Mon Feb 11 11:51:21 2019
@@ -775,9 +775,6 @@ public:
/// all uses, uses appear in the right places). This is used by unit tests.
void verifyMemorySSA() const;
- /// Check clobber sanity for an access.
- void checkClobberSanityAccess(const MemoryAccess *MA) const;
-
/// Used in various insertion functions to specify whether we are talking
/// about the beginning or end of a block.
enum InsertionPlace { Beginning, End };
@@ -792,7 +789,6 @@ protected:
void verifyDomination(Function &F) const;
void verifyOrdering(Function &F) const;
void verifyDominationNumbers(const Function &F) const;
- void verifyClobberSanity(const Function &F) const;
// This is used by the use optimizer and updater.
AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
Modified: llvm/trunk/lib/Analysis/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemorySSA.cpp?rev=353739&r1=353738&r2=353739&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Analysis/MemorySSA.cpp Mon Feb 11 11:51:21 2019
@@ -380,7 +380,7 @@ static bool isUseTriviallyOptimizableToL
/// \param Query The UpwardsMemoryQuery we used for our search.
/// \param AA The AliasAnalysis we used for our search.
/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
-static void
+LLVM_ATTRIBUTE_UNUSED static void
checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
const MemoryLocation &StartLoc, const MemorySSA &MSSA,
const UpwardsMemoryQuery &Query, AliasAnalysis &AA,
@@ -1778,34 +1778,16 @@ void MemorySSA::verifyMemorySSA() const
verifyOrdering(F);
verifyDominationNumbers(F);
Walker->verify(this);
- verifyClobberSanity(F);
-}
-
-/// Check sanity of the clobbering instruction for access MA.
-void MemorySSA::checkClobberSanityAccess(const MemoryAccess *MA) const {
- if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
- if (!MUD->isOptimized())
- return;
- auto *I = MUD->getMemoryInst();
- auto Loc = MemoryLocation::getOrNone(I);
- if (Loc == None)
- return;
- auto *Clobber = MUD->getOptimized();
- UpwardsMemoryQuery Q(I, MUD);
- checkClobberSanity(MUD, Clobber, *Loc, *this, Q, *AA, true);
- }
-}
-
-void MemorySSA::verifyClobberSanity(const Function &F) const {
-#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
- for (const BasicBlock &BB : F) {
- const AccessList *Accesses = getBlockAccesses(&BB);
- if (!Accesses)
- continue;
- for (const MemoryAccess &MA : *Accesses)
- checkClobberSanityAccess(&MA);
- }
-#endif
+ // Previously, the verification used to also verify that the clobberingAccess
+ // cached by MemorySSA is the same as the clobberingAccess found at a later
+ // query to AA. This does not hold true in general due to the current fragility
+ // of BasicAA which has arbitrary caps on the things it analyzes before giving
+ // up. As a result, transformations that are correct, will lead to BasicAA
+ // returning different Alias answers before and after that transformation.
+ // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
+ // random, in the worst case we'd need to rebuild MemorySSA from scratch after
+ // every transformation, which defeats the purpose of using it. For such an
+ // example, see test4 added in D51960.
}
/// Verify that all of the blocks we believe to have valid domination numbers
Added: llvm/trunk/test/Analysis/MemorySSA/pr40509.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/pr40509.ll?rev=353739&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/MemorySSA/pr40509.ll (added)
+++ llvm/trunk/test/Analysis/MemorySSA/pr40509.ll Mon Feb 11 11:51:21 2019
@@ -0,0 +1,54 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=systemz-unknown -march=z13 -O3 -enable-mssa-loop-dependency -disable-output %s
+
+; During transform to LCSSA, an access becomes obfuscated to:
+; (2 = phi (phi(val), val)), which BasicAA fails to analyze.
+; It's currently hard coded in BasicAA to return MayAlias for nested phis.
+; This leads MemorySSA to finding a new (false) clobber for a previously
+; optimized access. With verifyClobber included in verifyMemorySSA, such a
+; transformation will cause MemorySSA verification to fail.
+; If the verifyClobber is re-enabled, this test will crash.
+
+target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
+target triple = "s390x-ibm-linux"
+
+%0 = type <{ i64, i8, i64, i16 }>
+
+ at g_54 = external dso_local global i16, align 2
+ at g_101 = external dso_local global <{ i64, i8, i64, i8, i8 }>, align 2
+
+declare dso_local void @safe_lshift_func_int16_t_s_s()
+declare dso_local i8 @safe_div_func_int8_t_s_s()
+
+define dso_local void @func_47(%0* %arg) {
+bb:
+ %tmp = alloca i32, align 4
+ br label %bb1
+
+bb1: ; preds = %bb12, %bb
+ %tmp2 = getelementptr inbounds %0, %0* %arg, i32 0, i32 3
+ store i16 undef, i16* %tmp2, align 1
+ %tmp3 = call signext i8 @safe_div_func_int8_t_s_s()
+ %tmp7 = icmp ne i8 %tmp3, 0
+ br i1 %tmp7, label %bb8, label %bb10
+
+bb8: ; preds = %bb1
+ %tmp9 = icmp eq i32 0, 0
+ br i1 %tmp9, label %bb12, label %bb13
+
+bb10: ; preds = %bb10, %bb1
+ call void @safe_lshift_func_int16_t_s_s()
+ %tmp11 = getelementptr inbounds %0, %0* %arg, i32 0, i32 3
+ store i16 0, i16* %tmp11, align 1
+ store i8 0, i8* getelementptr inbounds (%0, %0* bitcast (<{ i64, i8, i64, i8, i8 }>* @g_101 to %0*), i32 0, i32 1), align 2
+ br label %bb10
+
+bb12: ; preds = %bb8
+ store i16 0, i16* @g_54, align 2
+ br label %bb1
+
+bb13: ; preds = %bb8
+ ret void
+}
+
+
More information about the llvm-commits
mailing list