[cfe-commits] r113836 - in /cfe/trunk: lib/CodeGen/CGException.h lib/CodeGen/CGExprCXX.cpp lib/CodeGen/CodeGenFunction.cpp lib/CodeGen/CodeGenFunction.h test/CodeGenCXX/exceptions.cpp

John McCall rjmccall at apple.com
Tue Sep 14 00:57:04 PDT 2010


Author: rjmccall
Date: Tue Sep 14 02:57:04 2010
New Revision: 113836

URL: http://llvm.org/viewvc/llvm-project?rev=113836&view=rev
Log:
Implement the EH cleanup to call 'operator delete' if a new-expression throws
(but not if destructors associated with the full-expression throw).


Modified:
    cfe/trunk/lib/CodeGen/CGException.h
    cfe/trunk/lib/CodeGen/CGExprCXX.cpp
    cfe/trunk/lib/CodeGen/CodeGenFunction.cpp
    cfe/trunk/lib/CodeGen/CodeGenFunction.h
    cfe/trunk/test/CodeGenCXX/exceptions.cpp

Modified: cfe/trunk/lib/CodeGen/CGException.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGException.h?rev=113836&r1=113835&r2=113836&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGException.h (original)
+++ cfe/trunk/lib/CodeGen/CGException.h Tue Sep 14 02:57:04 2010
@@ -160,11 +160,14 @@
   /// Whether this cleanup needs to be run along exception edges.
   bool IsEHCleanup : 1;
 
-  /// Whether this cleanup was activated before all normal uses.
-  bool ActivatedBeforeNormalUse : 1;
+  /// Whether this cleanup is currently active.
+  bool IsActive : 1;
 
-  /// Whether this cleanup was activated before all EH uses.
-  bool ActivatedBeforeEHUse : 1;
+  /// Whether the normal cleanup should test the activation flag.
+  bool TestFlagInNormalCleanup : 1;
+
+  /// Whether the EH cleanup should test the activation flag.
+  bool TestFlagInEHCleanup : 1;
 
   /// The amount of extra storage needed by the Cleanup.
   /// Always a multiple of the scope-stack alignment.
@@ -173,7 +176,7 @@
   /// The number of fixups required by enclosing scopes (not including
   /// this one).  If this is the top cleanup scope, all the fixups
   /// from this index onwards belong to this scope.
-  unsigned FixupDepth : BitsRemaining - 16;
+  unsigned FixupDepth : BitsRemaining - 17; // currently 13
 
   /// The nearest normal cleanup scope enclosing this one.
   EHScopeStack::stable_iterator EnclosingNormal;
@@ -190,12 +193,8 @@
   llvm::BasicBlock *EHBlock;
 
   /// An optional i1 variable indicating whether this cleanup has been
-  /// activated yet.  This has one of three states:
-  ///   - it is null if the cleanup is inactive
-  ///   - it is activeSentinel() if the cleanup is active and was not
-  ///     required before activation
-  ///   - it points to a valid variable
-  llvm::AllocaInst *ActiveVar;
+  /// activated yet.
+  llvm::AllocaInst *ActiveFlag;
 
   /// Extra information required for cleanups that have resolved
   /// branches through them.  This has to be allocated on the side
@@ -246,14 +245,11 @@
                  EHScopeStack::stable_iterator EnclosingNormal,
                  EHScopeStack::stable_iterator EnclosingEH)
     : EHScope(EHScope::Cleanup),
-      IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
-      ActivatedBeforeNormalUse(IsActive),
-      ActivatedBeforeEHUse(IsActive),
+      IsNormalCleanup(IsNormal), IsEHCleanup(IsEH), IsActive(IsActive),
+      TestFlagInNormalCleanup(false), TestFlagInEHCleanup(false),
       CleanupSize(CleanupSize), FixupDepth(FixupDepth),
       EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
-      NormalBlock(0), EHBlock(0),
-      ActiveVar(IsActive ? activeSentinel() : 0),
-      ExtInfo(0)
+      NormalBlock(0), EHBlock(0), ActiveFlag(0), ExtInfo(0)
   {
     assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
   }
@@ -270,19 +266,17 @@
   llvm::BasicBlock *getEHBlock() const { return EHBlock; }
   void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
 
-  static llvm::AllocaInst *activeSentinel() {
-    return reinterpret_cast<llvm::AllocaInst*>(1);
-  }
+  bool isActive() const { return IsActive; }
+  void setActive(bool A) { IsActive = A; }
 
-  bool isActive() const { return ActiveVar != 0; }
-  llvm::AllocaInst *getActiveVar() const { return ActiveVar; }
-  void setActiveVar(llvm::AllocaInst *Var) { ActiveVar = Var; }
+  llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
+  void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
 
-  bool wasActivatedBeforeNormalUse() const { return ActivatedBeforeNormalUse; }
-  void setActivatedBeforeNormalUse(bool B) { ActivatedBeforeNormalUse = B; }
+  void setTestFlagInNormalCleanup() { TestFlagInNormalCleanup = true; }
+  bool shouldTestFlagInNormalCleanup() const { return TestFlagInNormalCleanup; }
 
-  bool wasActivatedBeforeEHUse() const { return ActivatedBeforeEHUse; }
-  void setActivatedBeforeEHUse(bool B) { ActivatedBeforeEHUse = B; }
+  void setTestFlagInEHCleanup() { TestFlagInEHCleanup = true; }
+  bool shouldTestFlagInEHCleanup() const { return TestFlagInEHCleanup; }
 
   unsigned getFixupDepth() const { return FixupDepth; }
   EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {

Modified: cfe/trunk/lib/CodeGen/CGExprCXX.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGExprCXX.cpp?rev=113836&r1=113835&r2=113836&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGExprCXX.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGExprCXX.cpp Tue Sep 14 02:57:04 2010
@@ -677,6 +677,62 @@
   StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
 }
 
+namespace {
+  /// A cleanup to call the given 'operator delete' function upon
+  /// abnormal exit from a new expression.
+  class CallDeleteDuringNew : public EHScopeStack::Cleanup {
+    size_t NumPlacementArgs;
+    const FunctionDecl *OperatorDelete;
+    llvm::Value *Ptr;
+    llvm::Value *AllocSize;
+
+    RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+
+  public:
+    static size_t getExtraSize(size_t NumPlacementArgs) {
+      return NumPlacementArgs * sizeof(RValue);
+    }
+
+    CallDeleteDuringNew(size_t NumPlacementArgs,
+                        const FunctionDecl *OperatorDelete,
+                        llvm::Value *Ptr,
+                        llvm::Value *AllocSize) 
+      : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+        Ptr(Ptr), AllocSize(AllocSize) {}
+
+    void setPlacementArg(unsigned I, RValue Arg) {
+      assert(I < NumPlacementArgs && "index out of range");
+      getPlacementArgs()[I] = Arg;
+    }
+
+    void Emit(CodeGenFunction &CGF, bool IsForEH) {
+      const FunctionProtoType *FPT
+        = OperatorDelete->getType()->getAs<FunctionProtoType>();
+      assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+             FPT->getNumArgs() == NumPlacementArgs + 2);
+
+      CallArgList DeleteArgs;
+
+      // The first argument is always a void*.
+      FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+      DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
+
+      // A member 'operator delete' can take an extra 'size_t' argument.
+      if (FPT->getNumArgs() == NumPlacementArgs + 2)
+        DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
+
+      // Pass the rest of the arguments, which must match exactly.
+      for (unsigned I = 0; I != NumPlacementArgs; ++I)
+        DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
+
+      // Call 'operator delete'.
+      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+                   CGF.CGM.GetAddrOfFunction(OperatorDelete),
+                   ReturnValueSlot(), DeleteArgs, OperatorDelete);
+    }
+  };
+}
+
 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
   QualType AllocType = E->getAllocatedType();
   if (AllocType->isArrayType())
@@ -769,9 +825,24 @@
                                                    AllocType);
   }
 
+  // If there's an operator delete, enter a cleanup to call it if an
+  // exception is thrown.
+  EHScopeStack::stable_iterator CallOperatorDelete;
+  if (E->getOperatorDelete()) {
+    CallDeleteDuringNew *Cleanup = CGF.EHStack
+      .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
+                                                 E->getNumPlacementArgs(),
+                                                 E->getOperatorDelete(),
+                                                 NewPtr, AllocSize);
+    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+      Cleanup->setPlacementArg(I, NewArgs[I+1].first);
+    CallOperatorDelete = EHStack.stable_begin();
+  }
+
   const llvm::Type *ElementPtrTy
     = ConvertTypeForMem(AllocType)->getPointerTo(AS);
   NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
+
   if (E->isArray()) {
     EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
 
@@ -784,6 +855,11 @@
   } else {
     EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
   }
+
+  // Deactivate the 'operator delete' cleanup if we finished
+  // initialization.
+  if (CallOperatorDelete.isValid())
+    DeactivateCleanupBlock(CallOperatorDelete);
   
   if (NullCheckResult) {
     Builder.CreateBr(NewEnd);

Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.cpp?rev=113836&r1=113835&r2=113836&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CodeGenFunction.cpp (original)
+++ cfe/trunk/lib/CodeGen/CodeGenFunction.cpp Tue Sep 14 02:57:04 2010
@@ -759,11 +759,51 @@
 
 static void EmitCleanup(CodeGenFunction &CGF,
                         EHScopeStack::Cleanup *Fn,
-                        bool ForEH) {
+                        bool ForEH,
+                        llvm::Value *ActiveFlag) {
+  // EH cleanups always occur within a terminate scope.
   if (ForEH) CGF.EHStack.pushTerminate();
+
+  // If there's an active flag, load it and skip the cleanup if it's
+  // false.
+  llvm::BasicBlock *ContBB = 0;
+  if (ActiveFlag) {
+    ContBB = CGF.createBasicBlock("cleanup.done");
+    llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
+    llvm::Value *IsActive
+      = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
+    CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
+    CGF.EmitBlock(CleanupBB);
+  }
+
+  // Ask the cleanup to emit itself.
   Fn->Emit(CGF, ForEH);
-  if (ForEH) CGF.EHStack.popTerminate();
   assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+
+  // Emit the continuation block if there was an active flag.
+  if (ActiveFlag)
+    CGF.EmitBlock(ContBB);
+
+  // Leave the terminate scope.
+  if (ForEH) CGF.EHStack.popTerminate();
+}
+
+static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
+                                          llvm::BasicBlock *From,
+                                          llvm::BasicBlock *To) {
+  // Exit is the exit block of a cleanup, so it always terminates in
+  // an unconditional branch or a switch.
+  llvm::TerminatorInst *Term = Exit->getTerminator();
+
+  if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+    assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
+    Br->setSuccessor(0, To);
+  } else {
+    llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
+    for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
+      if (Switch->getSuccessor(I) == From)
+        Switch->setSuccessor(I, To);
+  }
 }
 
 /// Pops a cleanup block.  If the block includes a normal cleanup, the
@@ -774,7 +814,13 @@
   assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
   EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
   assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
-  assert(Scope.isActive() && "cleanup was still inactive when popped!");
+
+  // Remember activation information.
+  bool IsActive = Scope.isActive();
+  llvm::Value *NormalActiveFlag =
+    Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
+  llvm::Value *EHActiveFlag = 
+    Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
 
   // Check whether we need an EH cleanup.  This is only true if we've
   // generated a lazy EH cleanup block.
@@ -791,7 +837,12 @@
 
   // - whether there's a fallthrough
   llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
-  bool HasFallthrough = (FallthroughSource != 0);
+  bool HasFallthrough = (FallthroughSource != 0 && IsActive);
+
+  // As a kindof crazy internal case, branch-through fall-throughs
+  // leave the insertion point set to the end of the last cleanup.
+  bool HasPrebranchedFallthrough =
+    (FallthroughSource && FallthroughSource->getTerminator());
 
   bool RequiresNormalCleanup = false;
   if (Scope.isNormalCleanup() &&
@@ -799,6 +850,40 @@
     RequiresNormalCleanup = true;
   }
 
+  assert(!HasPrebranchedFallthrough || RequiresNormalCleanup || !IsActive);
+  assert(!HasPrebranchedFallthrough ||
+         (Scope.isNormalCleanup() && Scope.getNormalBlock() &&
+          FallthroughSource->getTerminator()->getSuccessor(0)
+            == Scope.getNormalBlock()));
+
+  // Even if we don't need the normal cleanup, we might still have
+  // prebranched fallthrough to worry about.
+  if (!RequiresNormalCleanup && HasPrebranchedFallthrough) {
+    assert(!IsActive);
+
+    llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
+
+    // If we're branching through this cleanup, just forward the
+    // prebranched fallthrough to the next cleanup, leaving the insert
+    // point in the old block.
+    if (FallthroughIsBranchThrough) {
+      EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+      llvm::BasicBlock *EnclosingEntry = 
+        CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+
+      ForwardPrebranchedFallthrough(FallthroughSource,
+                                    NormalEntry, EnclosingEntry);
+      assert(NormalEntry->use_empty() &&
+             "uses of entry remain after forwarding?");
+      delete NormalEntry;
+
+    // Otherwise, we're branching out;  just emit the next block.
+    } else {
+      EmitBlock(NormalEntry);
+      SimplifyCleanupEntry(*this, NormalEntry);
+    }
+  }
+
   // If we don't need the cleanup at all, we're done.
   if (!RequiresNormalCleanup && !RequiresEHCleanup) {
     EHStack.popCleanup(); // safe because there are no fixups
@@ -877,14 +962,6 @@
   if (!RequiresNormalCleanup) {
     EHStack.popCleanup();
   } else {
-    // As a kindof crazy internal case, branch-through fall-throughs
-    // leave the insertion point set to the end of the last cleanup.
-    bool HasPrebranchedFallthrough =
-      (HasFallthrough && FallthroughSource->getTerminator());
-    assert(!HasPrebranchedFallthrough ||
-           FallthroughSource->getTerminator()->getSuccessor(0)
-             == Scope.getNormalBlock());
-
     // If we have a fallthrough and no other need for the cleanup,
     // emit it directly.
     if (HasFallthrough && !HasPrebranchedFallthrough &&
@@ -901,7 +978,7 @@
 
       EHStack.popCleanup();
 
-      EmitCleanup(*this, Fn, /*ForEH*/ false);
+      EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
 
     // Otherwise, the best approach is to thread everything through
     // the cleanup block and then try to clean up after ourselves.
@@ -909,16 +986,30 @@
       // Force the entry block to exist.
       llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
 
+      // I.  Set up the fallthrough edge in.
+
       // If there's a fallthrough, we need to store the cleanup
       // destination index.  For fall-throughs this is always zero.
-      if (HasFallthrough && !HasPrebranchedFallthrough)
-        Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+      if (HasFallthrough) {
+        if (!HasPrebranchedFallthrough)
+          Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+      // Otherwise, clear the IP if we don't have fallthrough because
+      // the cleanup is inactive.  We don't need to save it because
+      // it's still just FallthroughSource.
+      } else if (FallthroughSource) {
+        assert(!IsActive && "source without fallthrough for active cleanup");
+        Builder.ClearInsertionPoint();
+      }
 
-      // Emit the entry block.  This implicitly branches to it if we
-      // have fallthrough.  All the fixups and existing branches should
-      // already be branched to it.
+      // II.  Emit the entry block.  This implicitly branches to it if
+      // we have fallthrough.  All the fixups and existing branches
+      // should already be branched to it.
       EmitBlock(NormalEntry);
 
+      // III.  Figure out where we're going and build the cleanup
+      // epilogue.
+
       bool HasEnclosingCleanups =
         (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
 
@@ -929,7 +1020,7 @@
       //     to the enclosing cleanup
       llvm::BasicBlock *BranchThroughDest = 0;
       if (Scope.hasBranchThroughs() ||
-          (HasFallthrough && FallthroughIsBranchThrough) ||
+          (FallthroughSource && FallthroughIsBranchThrough) ||
           (HasFixups && HasEnclosingCleanups)) {
         assert(HasEnclosingCleanups);
         EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
@@ -943,7 +1034,7 @@
       // we can route it without a switch.
       if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
           Scope.getNumBranchAfters() == 1) {
-        assert(!BranchThroughDest);
+        assert(!BranchThroughDest || !IsActive);
 
         // TODO: clean up the possibly dead stores to the cleanup dest slot.
         llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
@@ -973,9 +1064,10 @@
         InstsToAppend.push_back(Switch);
 
         // Branch-after fallthrough.
-        if (HasFallthrough && !FallthroughIsBranchThrough) {
+        if (FallthroughSource && !FallthroughIsBranchThrough) {
           FallthroughDest = createBasicBlock("cleanup.cont");
-          Switch->addCase(Builder.getInt32(0), FallthroughDest);
+          if (HasFallthrough)
+            Switch->addCase(Builder.getInt32(0), FallthroughDest);
         }
 
         for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
@@ -991,11 +1083,11 @@
         InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
       }
 
-      // We're finally ready to pop the cleanup.
+      // IV.  Pop the cleanup and emit it.
       EHStack.popCleanup();
       assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
 
-      EmitCleanup(*this, Fn, /*ForEH*/ false);
+      EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
 
       // Append the prepared cleanup prologue from above.
       llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
@@ -1015,11 +1107,47 @@
         }
         Fixup.OptimisticBranchBlock = NormalExit;
       }
+
+      // V.  Set up the fallthrough edge out.
       
-      if (FallthroughDest)
+      // Case 1: a fallthrough source exists but shouldn't branch to
+      // the cleanup because the cleanup is inactive.
+      if (!HasFallthrough && FallthroughSource) {
+        assert(!IsActive);
+
+        // If we have a prebranched fallthrough, that needs to be
+        // forwarded to the right block.
+        if (HasPrebranchedFallthrough) {
+          llvm::BasicBlock *Next;
+          if (FallthroughIsBranchThrough) {
+            Next = BranchThroughDest;
+            assert(!FallthroughDest);
+          } else {
+            Next = FallthroughDest;
+          }
+
+          ForwardPrebranchedFallthrough(FallthroughSource, NormalEntry, Next);
+        }
+        Builder.SetInsertPoint(FallthroughSource);
+
+      // Case 2: a fallthrough source exists and should branch to the
+      // cleanup, but we're not supposed to branch through to the next
+      // cleanup.
+      } else if (HasFallthrough && FallthroughDest) {
+        assert(!FallthroughIsBranchThrough);
         EmitBlock(FallthroughDest);
-      else if (!HasFallthrough)
+
+      // Case 3: a fallthrough source exists and should branch to the
+      // cleanup and then through to the next.
+      } else if (HasFallthrough) {
+        // Everything is already set up for this.
+
+      // Case 4: no fallthrough source exists.
+      } else {
         Builder.ClearInsertionPoint();
+      }
+
+      // VI.  Assorted cleaning.
 
       // Check whether we can merge NormalEntry into a single predecessor.
       // This might invalidate (non-IR) pointers to NormalEntry.
@@ -1042,7 +1170,7 @@
     CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
 
     EmitBlock(EHEntry);
-    EmitCleanup(*this, Fn, /*ForEH*/ true);
+    EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag);
 
     // Append the prepared cleanup prologue from above.
     llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
@@ -1252,71 +1380,121 @@
     EHStack.popNullFixups();
 }
 
-/// Activate a cleanup that was created in an inactivated state.
-void CodeGenFunction::ActivateCleanup(EHScopeStack::stable_iterator C) {
-  assert(C != EHStack.stable_end() && "activating bottom of stack?");
-  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
-  assert(!Scope.isActive() && "double activation");
+static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
+                                  EHScopeStack::stable_iterator C) {
+  // If we needed a normal block for any reason, that counts.
+  if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
+    return true;
+
+  // Check whether any enclosed cleanups were needed.
+  for (EHScopeStack::stable_iterator
+         I = EHStack.getInnermostNormalCleanup();
+         I != C; ) {
+    assert(C.strictlyEncloses(I));
+    EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+    if (S.getNormalBlock()) return true;
+    I = S.getEnclosingNormalCleanup();
+  }
+
+  return false;
+}
+
+static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
+                              EHScopeStack::stable_iterator C) {
+  // If we needed an EH block for any reason, that counts.
+  if (cast<EHCleanupScope>(*EHStack.find(C)).getEHBlock())
+    return true;
+
+  // Check whether any enclosed cleanups were needed.
+  for (EHScopeStack::stable_iterator
+         I = EHStack.getInnermostEHCleanup(); I != C; ) {
+    assert(C.strictlyEncloses(I));
+    EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+    if (S.getEHBlock()) return true;
+    I = S.getEnclosingEHCleanup();
+  }
+
+  return false;
+}
+
+enum ForActivation_t {
+  ForActivation,
+  ForDeactivation
+};
+
+/// The given cleanup block is changing activation state.  Configure a
+/// cleanup variable if necessary.
+///
+/// It would be good if we had some way of determining if there were
+/// extra uses *after* the change-over point.
+static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
+                                        EHScopeStack::stable_iterator C,
+                                        ForActivation_t Kind) {
+  EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
+  assert(!Scope.getActiveFlag() && "scope already has activation flag");
+
+  bool NeedFlag = false;
 
   // Calculate whether the cleanup was used:
-  bool Used = false;
 
   //   - as a normal cleanup
-  if (Scope.isNormalCleanup()) {
-    bool NormalUsed = false;
-    if (Scope.getNormalBlock()) {
-      NormalUsed = true;
-    } else {
-      // Check whether any enclosed cleanups were needed.
-      for (EHScopeStack::stable_iterator
-             I = EHStack.getInnermostNormalCleanup(); I != C; ) {
-        assert(C.strictlyEncloses(I));
-        EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
-        if (S.getNormalBlock()) {
-          NormalUsed = true;
-          break;
-        }
-        I = S.getEnclosingNormalCleanup();
-      }
-    }
-
-    if (NormalUsed)
-      Used = true;
-    else
-      Scope.setActivatedBeforeNormalUse(true);
+  if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) {
+    Scope.setTestFlagInNormalCleanup();
+    NeedFlag = true;
   }
 
   //  - as an EH cleanup
-  if (Scope.isEHCleanup()) {
-    bool EHUsed = false;
-    if (Scope.getEHBlock()) {
-      EHUsed = true;
-    } else {
-      // Check whether any enclosed cleanups were needed.
-      for (EHScopeStack::stable_iterator
-             I = EHStack.getInnermostEHCleanup(); I != C; ) {
-        assert(C.strictlyEncloses(I));
-        EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
-        if (S.getEHBlock()) {
-          EHUsed = true;
-          break;
-        }
-        I = S.getEnclosingEHCleanup();
-      }
-    }
+  if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) {
+    Scope.setTestFlagInEHCleanup();
+    NeedFlag = true;
+  }
+
+  // If it hasn't yet been used as either, we're done.
+  if (!NeedFlag) return;
 
-    if (EHUsed)
-      Used = true;
-    else
-      Scope.setActivatedBeforeEHUse(true);
+  llvm::AllocaInst *Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty());
+  Scope.setActiveFlag(Var);
+
+  if (Kind == ForActivation) {
+    CGF.InitTempAlloca(Var, CGF.Builder.getFalse());
+    CGF.Builder.CreateStore(CGF.Builder.getTrue(), Var);
+  } else {
+    CGF.InitTempAlloca(Var, CGF.Builder.getTrue());
+    CGF.Builder.CreateStore(CGF.Builder.getFalse(), Var);
   }
-  
-  llvm::AllocaInst *Var = EHCleanupScope::activeSentinel();
-  if (Used) {
-    Var = CreateTempAlloca(Builder.getInt1Ty());
-    InitTempAlloca(Var, Builder.getFalse());
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) {
+  assert(C != EHStack.stable_end() && "activating bottom of stack?");
+  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+  assert(!Scope.isActive() && "double activation");
+
+  SetupCleanupBlockActivation(*this, C, ForActivation);
+
+  Scope.setActive(true);
+}
+
+/// Deactive a cleanup that was created in an active state.
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) {
+  assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
+  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+  assert(Scope.isActive() && "double activation");
+
+  // If it's the top of the stack, just pop it.
+  if (C == EHStack.stable_begin()) {
+    // If it's a normal cleanup, we need to pretend that the
+    // fallthrough is unreachable.
+    CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+    PopCleanupBlock();
+    Builder.restoreIP(SavedIP);
+    return;
   }
-  Scope.setActiveVar(Var);
+
+  // Otherwise, follow the general case.
+  SetupCleanupBlockActivation(*this, C, ForDeactivation);
+
+  Scope.setActive(false);
 }
 
 llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {

Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=113836&r1=113835&r2=113836&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original)
+++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Tue Sep 14 02:57:04 2010
@@ -285,6 +285,25 @@
     (void) Obj;
   }
 
+  // Feel free to add more variants of the following:
+
+  /// Push a cleanup with non-constant storage requirements on the
+  /// stack.  The cleanup type must provide an additional static method:
+  ///   static size_t getExtraSize(size_t);
+  /// The argument to this method will be the value N, which will also
+  /// be passed as the first argument to the constructor.
+  ///
+  /// The data stored in the extra storage must obey the same
+  /// restrictions as normal cleanup member data.
+  ///
+  /// The pointer returned from this method is valid until the cleanup
+  /// stack is modified.
+  template <class T, class A0, class A1, class A2>
+  T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
+    void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
+    return new (Buffer) T(N, a0, a1, a2);
+  }
+
   /// Pops a cleanup scope off the stack.  This should only be called
   /// by CodeGenFunction::PopCleanupBlock.
   void popCleanup();
@@ -542,7 +561,14 @@
   /// process all branch fixups.
   void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
 
-  void ActivateCleanup(EHScopeStack::stable_iterator Cleanup);
+  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
+  /// The block cannot be reactivated.  Pops it if it's the top of the
+  /// stack.
+  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
+
+  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
+  /// Cannot be used to resurrect a deactivated cleanup.
+  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
 
   /// \brief Enters a new scope for capturing cleanups, all of which
   /// will be executed once the scope is exited.

Modified: cfe/trunk/test/CodeGenCXX/exceptions.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/exceptions.cpp?rev=113836&r1=113835&r2=113836&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenCXX/exceptions.cpp (original)
+++ cfe/trunk/test/CodeGenCXX/exceptions.cpp Tue Sep 14 02:57:04 2010
@@ -1,18 +1,210 @@
-// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin10 -emit-llvm -o - -fexceptions
+// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin10 -emit-llvm -o - -fexceptions | FileCheck %s
 
-struct allocator {
-  allocator();
-  allocator(const allocator&);
-  ~allocator();
-};
-
-void f();
-void g(bool b, bool c) {
-  if (b) {
-    if (!c)
-    throw allocator();
+typedef typeof(sizeof(0)) size_t;
 
-    return;
+// This just shouldn't crash.
+namespace test0 {
+  struct allocator {
+    allocator();
+    allocator(const allocator&);
+    ~allocator();
+  };
+
+  void f();
+  void g(bool b, bool c) {
+    if (b) {
+      if (!c)
+        throw allocator();
+
+      return;
+    }
+    f();
+  }
+}
+
+namespace test1 {
+  struct A { A(int); A(int, int); ~A(); void *p; };
+
+  A *a() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test11aEv()
+    // CHECK:      [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 5)
+    // CHECK:      ret [[A]]* [[CAST]]
+    // CHECK:      call void @_ZdlPv(i8* [[NEW]])
+    return new A(5);
+  }
+
+  A *b() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test11bEv()
+    // CHECK:      [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: [[FOO:%.*]] = invoke i32 @_ZN5test13fooEv()
+    // CHECK:      invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[FOO]])
+    // CHECK:      ret [[A]]* [[CAST]]
+    // CHECK:      call void @_ZdlPv(i8* [[NEW]])
+    extern int foo();
+    return new A(foo());
+  }
+
+  struct B { B(); ~B(); operator int(); int x; };
+  B makeB();
+
+  A *c() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test11cEv()
+    // CHECK:      [[ACTIVE:%.*]] = alloca i1
+    // CHECK-NEXT: store i1 true, i1* [[ACTIVE]] 
+    // CHECK-NEXT: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: invoke void @_ZN5test11BC1Ev([[B:%.*]]* [[T0:%.*]])
+    // CHECK:      [[T1:%.*]] = getelementptr inbounds [[B]]* [[T0]], i32 0, i32 0
+    // CHECK-NEXT: [[T2:%.*]] = load i32* [[T1]], align 4
+    // CHECK-NEXT: invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[T2]])
+    // CHECK:      store i1 false, i1* [[ACTIVE]]
+    // CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
+    // CHECK:      ret [[A]]* [[CAST]]
+    // CHECK:      [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
+    // CHECK-NEXT: br i1 [[ISACTIVE]]
+    // CHECK:      call void @_ZdlPv(i8* [[NEW]])
+    return new A(B().x);
+  }
+
+  A *d() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test11dEv()
+    // CHECK:      [[ACTIVE:%.*]] = alloca i1
+    // CHECK-NEXT: store i1 true, i1* [[ACTIVE]] 
+    // CHECK-NEXT: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: invoke void @_ZN5test11BC1Ev([[B:%.*]]* [[T0:%.*]])
+    // CHECK:      [[T1:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T0]])
+    // CHECK:      invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[T1]])
+    // CHECK:      store i1 false, i1* [[ACTIVE]]
+    // CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
+    // CHECK:      ret [[A]]* [[CAST]]
+    // CHECK:      [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
+    // CHECK-NEXT: br i1 [[ISACTIVE]]
+    // CHECK:      call void @_ZdlPv(i8* [[NEW]])
+    return new A(B());
+  }
+
+  A *e() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test11eEv()
+    // CHECK:      [[ACTIVE:%.*]] = alloca i1
+    // CHECK-NEXT: store i1 true, i1* [[ACTIVE]] 
+    // CHECK-NEXT: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: invoke void @_ZN5test11BC1Ev([[B:%.*]]* [[T0:%.*]])
+    // CHECK:      [[T1:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T0]])
+    // CHECK:      invoke void @_ZN5test11BC1Ev([[B]]* [[T2:%.*]])
+    // CHECK:      [[T3:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T2]])
+    // CHECK:      invoke void @_ZN5test11AC1Eii([[A]]* [[CAST]], i32 [[T1]], i32 [[T3]])
+    // CHECK:      store i1 false, i1* [[ACTIVE]]
+    // CHECK-NEXT: invoke void @_ZN5test11BD1Ev([[B]]* [[T2]])
+    // CHECK:      invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
+    // CHECK:      ret [[A]]* [[CAST]]
+    // CHECK:      [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
+    // CHECK-NEXT: br i1 [[ISACTIVE]]
+    // CHECK:      call void @_ZdlPv(i8* [[NEW]])
+    return new A(B(), B());
+  }
+  A *f() {
+    return new A(makeB().x);
+  }
+  A *g() {
+    return new A(makeB());
+  }
+  A *h() {
+    return new A(makeB(), makeB());
   }
-  f();
+
+  A *i() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test11iEv()
+    // CHECK:      [[X:%.*]] = alloca [[A]]*, align 8
+    // CHECK:      [[ACTIVE:%.*]] = alloca i1
+    // CHECK:      store i1 true, i1* [[ACTIVE]] 
+    // CHECK-NEXT: [[NEW:%.*]] = call noalias i8* @_Znwm(i64 8)
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: invoke void @_ZN5test15makeBEv([[B:%.*]]* sret [[T0:%.*]])
+    // CHECK:      [[T1:%.*]] = invoke i32 @_ZN5test11BcviEv([[B]]* [[T0]])
+    // CHECK:      invoke void @_ZN5test11AC1Ei([[A]]* [[CAST]], i32 [[T1]])
+    // CHECK:      store i1 false, i1* [[ACTIVE]]
+    // CHECK-NEXT: store [[A]]* [[CAST]], [[A]]** [[X]], align 8
+    // CHECK:      invoke void @_ZN5test15makeBEv([[B:%.*]]* sret [[T2:%.*]])
+    // CHECK:      [[RET:%.*]] = load [[A]]** [[X]], align 8
+    // CHECK:      invoke void @_ZN5test11BD1Ev([[B]]* [[T2]])
+    // CHECK:      invoke void @_ZN5test11BD1Ev([[B]]* [[T0]])
+    // CHECK:      ret [[A]]* [[RET]]
+    // CHECK:      [[ISACTIVE:%.*]] = load i1* [[ACTIVE]]
+    // CHECK-NEXT: br i1 [[ISACTIVE]]
+    // CHECK:      call void @_ZdlPv(i8* [[NEW]])
+    A *x;
+    return (x = new A(makeB()), makeB(), x);
+  }
+}
+
+namespace test2 {
+  struct A {
+    A(int); A(int, int); ~A();
+    void *p;
+    void *operator new(size_t);
+    void operator delete(void*, size_t);
+  };
+
+  A *a() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test21aEv()
+    // CHECK:      [[NEW:%.*]] = call i8* @_ZN5test21AnwEm(i64 8)
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: invoke void @_ZN5test21AC1Ei([[A]]* [[CAST]], i32 5)
+    // CHECK:      ret [[A]]* [[CAST]]
+    // CHECK:      invoke void @_ZN5test21AdlEPvm(i8* [[NEW]], i64 8)
+    // CHECK:      call void @_ZSt9terminatev()
+    return new A(5);
+  }
+}
+
+namespace test3 {
+  struct A {
+    A(int); A(int, int); ~A();
+    void *p;
+    void *operator new(size_t, void*, void*);
+    void operator delete(void*, void*, void*);
+  };
+
+  A *a() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test31aEv()
+    // CHECK:      [[FOO:%.*]] = call i8* @_ZN5test33fooEv()
+    // CHECK:      [[BAR:%.*]] = call i8* @_ZN5test33barEv()
+    // CHECK:      [[NEW:%.*]] = call i8* @_ZN5test31AnwEmPvS1_(i64 8, i8* [[FOO]], i8* [[BAR]])
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: invoke void @_ZN5test31AC1Ei([[A]]* [[CAST]], i32 5)
+    // CHECK:      ret [[A]]* [[CAST]]
+    // CHECK:      invoke void @_ZN5test31AdlEPvS1_S1_(i8* [[NEW]], i8* [[FOO]], i8* [[BAR]])
+    // CHECK:      call void @_ZSt9terminatev()
+    extern void *foo(), *bar();
+
+    return new(foo(),bar()) A(5);
+  }
+}
+
+namespace test4 {
+  struct A {
+    A(int); A(int, int); ~A();
+    void *p;
+    void *operator new(size_t, void*, void*);
+    void operator delete(void*, size_t, void*, void*); // not a match
+  };
+
+  A *a() {
+    // CHECK:    define [[A:%.*]]* @_ZN5test41aEv()
+    // CHECK:      [[FOO:%.*]] = call i8* @_ZN5test43fooEv()
+    // CHECK-NEXT: [[BAR:%.*]] = call i8* @_ZN5test43barEv()
+    // CHECK-NEXT: [[NEW:%.*]] = call i8* @_ZN5test41AnwEmPvS1_(i64 8, i8* [[FOO]], i8* [[BAR]])
+    // CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[NEW]] to [[A]]*
+    // CHECK-NEXT: call void @_ZN5test41AC1Ei([[A]]* [[CAST]], i32 5)
+    // CHECK-NEXT: ret [[A]]* [[CAST]]
+    extern void *foo(), *bar();
+
+    return new(foo(),bar()) A(5);
+  }
+
 }





More information about the cfe-commits mailing list