[llvm-commits] [llvm] r165032 - /llvm/trunk/lib/Transforms/Scalar/SROA.cpp

Chandler Carruth chandlerc at gmail.com
Tue Oct 2 11:57:13 PDT 2012


Author: chandlerc
Date: Tue Oct  2 13:57:13 2012
New Revision: 165032

URL: http://llvm.org/viewvc/llvm-project?rev=165032&view=rev
Log:
Fix another crasher in SROA, reported by Joel.

We require that the indices into the use lists are stable in order to
build fast lookup tables to locate a particular partition use from an
operand of a PHI or select. This is (obviously in hind sight)
incompatible with erasing elements from the array. Really, we don't want
to erase anyways. It is expensive, and a rare operation. Instead, simply
weaken the contract of the PartitionUse structure to allow null Use
pointers to represent dead uses. Now we can clear out the pointer to
mark things as dead, and all it requires is adding some 'continue'
checks to the various loops.

I'm still reducing a test case for this, as the test case I have is
huge. I think this one I can get a nice test case for though, as it was
much more deterministic.

Modified:
    llvm/trunk/lib/Transforms/Scalar/SROA.cpp

Modified: llvm/trunk/lib/Transforms/Scalar/SROA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SROA.cpp?rev=165032&r1=165031&r2=165032&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/SROA.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/SROA.cpp Tue Oct  2 13:57:13 2012
@@ -152,7 +152,10 @@
   /// intentionally overlap between various uses of the same partition.
   struct PartitionUse : public ByteRange {
     /// \brief The use in question. Provides access to both user and used value.
-    Use* U;
+    ///
+    /// Note that this may be null if the partition use is *dead*, that is, it
+    /// should be ignored.
+    Use *U;
 
     PartitionUse() : ByteRange(), U() {}
     PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U)
@@ -220,10 +223,6 @@
   void use_push_back(const_iterator I, const PartitionUse &PU) {
     Uses[I - begin()].push_back(PU);
   }
-  void use_erase(unsigned Idx, use_iterator UI) { Uses[Idx].erase(UI); }
-  void use_erase(const_iterator I, use_iterator UI) {
-    Uses[I - begin()].erase(UI);
-  }
   /// @}
 
   /// \brief Allow iterating the dead users for this alloca.
@@ -1112,6 +1111,8 @@
 Type *AllocaPartitioning::getCommonType(iterator I) const {
   Type *Ty = 0;
   for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
+    if (!UI->U)
+      continue; // Skip dead uses.
     if (isa<IntrinsicInst>(*UI->U->getUser()))
       continue;
     if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset)
@@ -1147,6 +1148,8 @@
                                     StringRef Indent) const {
   for (const_use_iterator UI = use_begin(I), UE = use_end(I);
        UI != UE; ++UI) {
+    if (!UI->U)
+      continue; // Skip dead uses.
     OS << Indent << "  [" << UI->BeginOffset << "," << UI->EndOffset << ") "
        << "used by: " << *UI->U->getUser() << "\n";
     if (MemTransferInst *II = dyn_cast<MemTransferInst>(UI->U->getUser())) {
@@ -1685,6 +1688,9 @@
   ElementSize /= 8;
 
   for (; I != E; ++I) {
+    if (!I->U)
+      continue; // Skip dead use.
+
     uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset;
     uint64_t BeginIndex = BeginOffset / ElementSize;
     if (BeginIndex * ElementSize != BeginOffset ||
@@ -1744,6 +1750,8 @@
   // splittable later) and lose the ability to promote each element access.
   bool WholeAllocaOp = false;
   for (; I != E; ++I) {
+    if (!I->U)
+      continue; // Skip dead use.
     if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
       if (LI->isVolatile() || !LI->getType()->isIntegerTy())
         return false;
@@ -1789,8 +1797,13 @@
     // Note that we need to use an index here as the underlying vector of uses
     // may be grown during speculation. However, we never need to re-visit the
     // new uses, and so we can use the initial size bound.
-    for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx)
-      visit(cast<Instruction>(P.getUse(PI, Idx).U->getUser()));
+    for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) {
+      const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx);
+      if (!PU.U)
+        continue; // Skip dead use.
+
+      visit(cast<Instruction>(PU.U->getUser()));
+    }
   }
 
 private:
@@ -2000,7 +2013,9 @@
         AllocaPartitioning::use_iterator UI
           = P.findPartitionUseForPHIOrSelectOperand(Ops[i]);
         PUs[i] = *UI;
-        P.use_erase(PIs[i], UI);
+        // Clear out the use here so that the offsets into the use list remain
+        // stable but this use is ignored when rewriting.
+        UI->U = 0;
       }
     }
 
@@ -2120,6 +2135,8 @@
     }
     bool CanSROA = true;
     for (; I != E; ++I) {
+      if (!I->U)
+        continue; // Skip dead uses.
       BeginOffset = I->BeginOffset;
       EndOffset = I->EndOffset;
       OldUse = I->U;
@@ -2994,7 +3011,13 @@
                                   AllocaPartitioning &P,
                                   AllocaPartitioning::iterator PI) {
   uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset;
-  if (P.use_begin(PI) == P.use_end(PI))
+  bool IsLive = false;
+  for (AllocaPartitioning::use_iterator UI = P.use_begin(PI),
+                                        UE = P.use_end(PI);
+       UI != UE && !IsLive; ++UI)
+    if (UI->U)
+      IsLive = true;
+  if (!IsLive)
     return false; // No live uses left of this partition.
 
   DEBUG(dbgs() << "Speculating PHIs and selects in partition "





More information about the llvm-commits mailing list