[clang] 47b5917 - [CIR] Add support for normal cleanups (#149948)
via cfe-commits
cfe-commits at lists.llvm.org
Thu Jul 24 11:38:46 PDT 2025
Author: Andy Kaylor
Date: 2025-07-24T11:38:42-07:00
New Revision: 47b5917348332ca5d51eb893abe53e42cabb8b1d
URL: https://github.com/llvm/llvm-project/commit/47b5917348332ca5d51eb893abe53e42cabb8b1d
DIFF: https://github.com/llvm/llvm-project/commit/47b5917348332ca5d51eb893abe53e42cabb8b1d.diff
LOG: [CIR] Add support for normal cleanups (#149948)
This change adds basic handling for normal cleanups. This is a very
minimal implemention. In particular, it uses a naive substitute for the
rich cleanup and EH stack handling that is present in classic codegen
and the CIR incubator. This is intended as a temporary implementation to
allow incremental progress. It is not expected to scale well enough to
be used in a production environment. It will be replaced with the full
EHScopeStack handling when such an implementation is needed.
Added:
clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
clang/lib/CIR/CodeGen/EHScopeStack.h
clang/test/CIR/CodeGen/cleanup.cpp
Modified:
clang/include/clang/CIR/MissingFeatures.h
clang/lib/CIR/CodeGen/CIRGenClass.cpp
clang/lib/CIR/CodeGen/CIRGenDecl.cpp
clang/lib/CIR/CodeGen/CIRGenFunction.cpp
clang/lib/CIR/CodeGen/CIRGenFunction.h
clang/lib/CIR/CodeGen/CIRGenStmt.cpp
clang/lib/CIR/CodeGen/CMakeLists.txt
Removed:
################################################################################
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 098c2cad46f67..e1a5c3d9ca337 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -196,6 +196,8 @@ struct MissingFeatures {
static bool cxxRecordStaticMembers() { return false; }
static bool dataLayoutTypeAllocSize() { return false; }
static bool deferredCXXGlobalInit() { return false; }
+ static bool ehCleanupFlags() { return false; }
+ static bool ehstackBranches() { return false; }
static bool emitCheckedInBoundsGEP() { return false; }
static bool emitCondLikelihoodViaExpectIntrinsic() { return false; }
static bool emitLifetimeMarkers() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index bf42ff7738b8c..50cca0e63611e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -480,6 +480,19 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) {
s->getStmtClassName());
}
+void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ // TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial
+ // dtors which shall be removed on later CIR passes. However, only remove this
+ // assertion after we have a test case to exercise this path.
+ assert(!dtor->isTrivial());
+ cgf.emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase*/ false,
+ /*delegating=*/false, addr, type);
+}
+
void CIRGenFunction::emitDelegatingCXXConstructorCall(
const CXXConstructorDecl *ctor, const FunctionArgList &args) {
assert(ctor->isDelegatingConstructor());
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
new file mode 100644
index 0000000000000..be21ce9c4a18c
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -0,0 +1,69 @@
+//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+//===----------------------------------------------------------------------===//
+// CIRGenFunction cleanup related
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// EHScopeStack
+//===----------------------------------------------------------------------===//
+
+void EHScopeStack::Cleanup::anchor() {}
+
+static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ mlir::Block *cleanup =
+ cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ return cleanup;
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CIRGenFunction::popCleanupBlock() {
+ assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!");
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ std::unique_ptr<EHScopeStack::Cleanup> cleanup =
+ ehStack.cleanupStack.pop_back_val();
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ mlir::Block *cleanupEntry = getCurCleanupBlock(*this);
+ builder.setInsertionPointToEnd(cleanupEntry);
+ cleanup->emit(*this);
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) {
+ assert(!cir::MissingFeatures::ehstackBranches());
+
+ assert(ehStack.getStackDepth() >= oldCleanupStackDepth);
+
+ // Pop cleanup blocks until we reach the base stack depth for the
+ // current scope.
+ while (ehStack.getStackDepth() > oldCleanupStackDepth) {
+ popCleanupBlock();
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index afbe92aded804..a28ac3c16ce59 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -183,8 +183,8 @@ void CIRGenFunction::emitAutoVarCleanups(
const VarDecl &d = *emission.Variable;
// Check the type for a cleanup.
- if (d.needsDestruction(getContext()))
- cgm.errorNYI(d.getSourceRange(), "emitAutoVarCleanups: type cleanup");
+ if (QualType::DestructionKind dtorKind = d.needsDestruction(getContext()))
+ emitAutoVarTypeCleanup(emission, dtorKind);
assert(!cir::MissingFeatures::opAllocaPreciseLifetime());
@@ -648,3 +648,96 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
assert(!cir::MissingFeatures::sanitizers());
}
+
+/// Immediately perform the destruction of the given object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+void CIRGenFunction::emitDestroy(Address addr, QualType type,
+ Destroyer *destroyer) {
+ if (getContext().getAsArrayType(type))
+ cgm.errorNYI("emitDestroy: array type");
+
+ return destroyer(*this, addr, type);
+}
+
+CIRGenFunction::Destroyer *
+CIRGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ return destroyCXXObject;
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI("getDestroyer: other destruction kind");
+ return nullptr;
+ }
+ llvm_unreachable("Unknown DestructionKind");
+}
+
+namespace {
+struct DestroyObject final : EHScopeStack::Cleanup {
+ DestroyObject(Address addr, QualType type,
+ CIRGenFunction::Destroyer *destroyer)
+ : addr(addr), type(type), destroyer(destroyer) {}
+
+ Address addr;
+ QualType type;
+ CIRGenFunction::Destroyer *destroyer;
+
+ void emit(CIRGenFunction &cgf) override {
+ cgf.emitDestroy(addr, type, destroyer);
+ }
+};
+} // namespace
+
+/// Enter a destroy cleanup for the given local variable.
+void CIRGenFunction::emitAutoVarTypeCleanup(
+ const CIRGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ Address addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CIRGenFunction::Destroyer *destroyer = nullptr;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a
diff erent
+ // cleanup.
+ if (emission.NRVOFlag) {
+ cgm.errorNYI(var->getSourceRange(), "emitAutoVarTypeCleanup: NRVO");
+ return;
+ }
+ // Otherwise, this is handled below.
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI(var->getSourceRange(),
+ "emitAutoVarTypeCleanup: other dtor kind");
+ return;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer)
+ destroyer = getDestroyer(dtorKind);
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index afdb1927f952e..b4b95d627c619 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -26,7 +26,11 @@ namespace clang::CIRGen {
CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext)
- : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {}
+ : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
+ ehStack.setCGF(this);
+ currentCleanupStackDepth = 0;
+ assert(ehStack.getStackDepth() == 0);
+}
CIRGenFunction::~CIRGenFunction() {}
@@ -227,6 +231,14 @@ void CIRGenFunction::LexicalScope::cleanup() {
CIRGenBuilderTy &builder = cgf.builder;
LexicalScope *localScope = cgf.curLexScope;
+ auto applyCleanup = [&]() {
+ if (performCleanup) {
+ // ApplyDebugLocation
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ forceCleanup();
+ }
+ };
+
if (returnBlock != nullptr) {
// Write out the return block, which loads the value from `__retval` and
// issues the `cir.return`.
@@ -235,32 +247,42 @@ void CIRGenFunction::LexicalScope::cleanup() {
(void)emitReturn(*returnLoc);
}
- mlir::Block *curBlock = builder.getBlock();
- if (isGlobalInit() && !curBlock)
- return;
- if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
- return;
-
- // Get rid of any empty block at the end of the scope.
- bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
- if (!entryBlock && curBlock->empty()) {
- curBlock->erase();
- if (returnBlock != nullptr && returnBlock->getUses().empty())
- returnBlock->erase();
- return;
- }
-
- // Reached the end of the scope.
- {
+ auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
mlir::OpBuilder::InsertionGuard guard(builder);
- builder.setInsertionPointToEnd(curBlock);
+ builder.setInsertionPointToEnd(insPt);
+
+ // If we still don't have a cleanup block, it means that `applyCleanup`
+ // below might be able to get us one.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+
+ // Leverage and defers to RunCleanupsScope's dtor and scope handling.
+ applyCleanup();
+
+ // If we now have one after `applyCleanup`, hook it up properly.
+ if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
+ cleanupBlock = localScope->getCleanupBlock(builder);
+ builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
+ if (!cleanupBlock->mightHaveTerminator()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToEnd(cleanupBlock);
+ builder.create<cir::YieldOp>(localScope->endLoc);
+ }
+ }
if (localScope->depth == 0) {
// Reached the end of the function.
if (returnBlock != nullptr) {
- if (returnBlock->getUses().empty())
+ if (returnBlock->getUses().empty()) {
returnBlock->erase();
- else {
+ } else {
+ // Thread return block via cleanup block.
+ if (cleanupBlock) {
+ for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
+ cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
+ brOp.setSuccessor(cleanupBlock);
+ }
+ }
+
builder.create<cir::BrOp>(*returnLoc, returnBlock);
return;
}
@@ -268,13 +290,50 @@ void CIRGenFunction::LexicalScope::cleanup() {
emitImplicitReturn();
return;
}
- // Reached the end of a non-function scope. Some scopes, such as those
- // used with the ?: operator, can return a value.
- if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
+
+ // End of any local scope != function
+ // Ternary ops have to deal with matching arms for yielding types
+ // and do return a value, it must do its own cir.yield insertion.
+ if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
!retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
: builder.create<cir::YieldOp>(localScope->endLoc, retVal);
}
+ };
+
+ // If a cleanup block has been created at some point, branch to it
+ // and set the insertion point to continue at the cleanup block.
+ // Terminators are then inserted either in the cleanup block or
+ // inline in this current block.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+ if (cleanupBlock)
+ insertCleanupAndLeave(cleanupBlock);
+
+ // Now deal with any pending block wrap up like implicit end of
+ // scope.
+
+ mlir::Block *curBlock = builder.getBlock();
+ if (isGlobalInit() && !curBlock)
+ return;
+ if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
+ return;
+
+ // Get rid of any empty block at the end of the scope.
+ bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
+ if (!entryBlock && curBlock->empty()) {
+ curBlock->erase();
+ if (returnBlock != nullptr && returnBlock->getUses().empty())
+ returnBlock->erase();
+ return;
}
+
+ // If there's a cleanup block, branch to it, nothing else to do.
+ if (cleanupBlock) {
+ builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
+ return;
+ }
+
+ // No pre-existent cleanup block, emit cleanup code and yield/return.
+ insertCleanupAndLeave(curBlock);
}
cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
@@ -408,7 +467,19 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
}
}
-void CIRGenFunction::finishFunction(SourceLocation endLoc) {}
+void CIRGenFunction::finishFunction(SourceLocation endLoc) {
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ // TODO(cir): Use prologueCleanupDepth here.
+ bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth;
+ if (hasCleanups) {
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ // FIXME(cir): should we clearInsertionPoint? breaks many testcases
+ popCleanupBlocks(currentCleanupStackDepth);
+ }
+}
mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
auto result = mlir::LogicalResult::success();
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 83e7f63773c3c..4891c7496588f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -18,6 +18,7 @@
#include "CIRGenModule.h"
#include "CIRGenTypeCache.h"
#include "CIRGenValue.h"
+#include "EHScopeStack.h"
#include "Address.h"
@@ -61,6 +62,9 @@ class CIRGenFunction : public CIRGenTypeCache {
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
+ /// Tracks function scope overall cleanup handling.
+ EHScopeStack ehStack;
+
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *cxxabiThisDecl = nullptr;
@@ -595,14 +599,65 @@ class CIRGenFunction : public CIRGenTypeCache {
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// Takes the old cleanup stack size and emits the cleanup blocks
+ /// that have been added.
+ void popCleanupBlocks(size_t oldCleanupStackDepth);
+ void popCleanupBlock();
+
+ /// Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ size_t cleanupStackDepth, oldCleanupStackDepth;
+
+ protected:
+ bool performCleanup;
+
+ private:
+ RunCleanupsScope(const RunCleanupsScope &) = delete;
+ void operator=(const RunCleanupsScope &) = delete;
+
+ protected:
+ CIRGenFunction &cgf;
+
+ /// Enter a new cleanup scope.
+ explicit RunCleanupsScope(CIRGenFunction &cgf)
+ : performCleanup(true), cgf(cgf) {
+ cleanupStackDepth = cgf.ehStack.getStackDepth();
+ oldCleanupStackDepth = cgf.currentCleanupStackDepth;
+ cgf.currentCleanupStackDepth = cleanupStackDepth;
+ }
+
+ /// Exit this cleanup scope, emitting any accumulated cleanups.
+ ~RunCleanupsScope() {
+ if (performCleanup)
+ forceCleanup();
+ }
+
+ /// Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void forceCleanup() {
+ assert(performCleanup && "Already forced cleanup");
+ {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ cgf.popCleanupBlocks(cleanupStackDepth);
+ performCleanup = false;
+ cgf.currentCleanupStackDepth = oldCleanupStackDepth;
+ }
+ }
+ };
+
+ // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
+ size_t currentCleanupStackDepth;
+
+public:
/// Represents a scope, including function bodies, compound statements, and
/// the substatements of if/while/do/for/switch/try statements. This class
/// handles any automatic cleanup, along with the return value.
- struct LexicalScope {
+ struct LexicalScope : public RunCleanupsScope {
private:
- // TODO(CIR): This will live in the base class RunCleanupScope once that
- // class is upstreamed.
- CIRGenFunction &cgf;
+ // Block containing cleanup code for things initialized in this
+ // lexical context (scope).
+ mlir::Block *cleanupBlock = nullptr;
// Points to the scope entry block. This is useful, for instance, for
// helping to insert allocas before finalizing any recursive CodeGen from
@@ -632,8 +687,8 @@ class CIRGenFunction : public CIRGenTypeCache {
unsigned depth = 0;
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
- : cgf(cgf), entryBlock(eb), parentScope(cgf.curLexScope), beginLoc(loc),
- endLoc(loc) {
+ : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
+ beginLoc(loc), endLoc(loc) {
assert(entryBlock && "LexicalScope requires an entry block");
cgf.curLexScope = this;
@@ -671,6 +726,27 @@ class CIRGenFunction : public CIRGenTypeCache {
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ // Lazy create cleanup block or return what's available.
+ mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
+ if (cleanupBlock)
+ return cleanupBlock;
+ cleanupBlock = createCleanupBlock(builder);
+ return cleanupBlock;
+ }
+
+ mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
+ return cleanupBlock;
+ }
+
+ mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
+ // Create the cleanup block but dont hook it up around just yet.
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
+ : &cgf.curFn->getRegion(0);
+ cleanupBlock = builder.createBlock(r);
+ return cleanupBlock;
+ }
+
// ---
// Return handling.
// ---
@@ -721,6 +797,12 @@ class CIRGenFunction : public CIRGenTypeCache {
LexicalScope *curLexScope = nullptr;
+ typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
+
+ static Destroyer destroyCXXObject;
+
+ Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
+
/// ----------------------
/// CIR emit functions
/// ----------------------
@@ -781,6 +863,8 @@ class CIRGenFunction : public CIRGenTypeCache {
void emitAutoVarCleanups(const AutoVarEmission &emission);
void emitAutoVarInit(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ clang::QualType::DestructionKind dtorKind);
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
CXXCtorInitializer *baseInit);
@@ -838,6 +922,9 @@ class CIRGenFunction : public CIRGenTypeCache {
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e);
void emitConstructorBody(FunctionArgList &args);
+
+ void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
+
void emitDestructorBody(FunctionArgList &args);
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 9193f6f1cd996..21bee3312eb0f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -409,7 +409,10 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
+ // This should emit a branch through the cleanup block if one exists.
builder.create<cir::BrOp>(loc, retBlock);
+ if (ehStack.getStackDepth() != currentCleanupStackDepth)
+ cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
builder.createBlock(builder.getBlock()->getParent());
return mlir::success();
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 03ea60c76c87d..ca3a329d0c56d 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -11,6 +11,7 @@ add_clang_library(clangCIR
CIRGenBuilder.cpp
CIRGenCall.cpp
CIRGenClass.cpp
+ CIRGenCleanup.cpp
CIRGenCXX.cpp
CIRGenCXXABI.cpp
CIRGenCXXExpr.cpp
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
new file mode 100644
index 0000000000000..22750aca3c4fc
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -0,0 +1,99 @@
+//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes should be the minimum interface required for other parts of
+// CIR CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and
+// other implemenentation details that are not widely needed are in
+// CIRGenCleanup.h.
+//
+// TODO(cir): this header should be shared between LLVM and CIR codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang::CIRGen {
+
+class CIRGenFunction;
+
+enum CleanupKind : unsigned {
+ /// Denotes a cleanup that should run when a scope is exited using exceptional
+ /// control flow (a throw statement leading to stack unwinding, ).
+ EHCleanup = 0x1,
+
+ /// Denotes a cleanup that should run when a scope is exited using normal
+ /// control flow (falling off the end of the scope, return, goto, ...).
+ NormalCleanup = 0x2,
+
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ LifetimeMarker = 0x8,
+ NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+
+ public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
+ virtual ~Cleanup() = default;
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param flags cleanup kind.
+ virtual void emit(CIRGenFunction &cgf) = 0;
+ };
+
+ // Classic codegen has a finely tuned custom allocator and a complex stack
+ // management scheme. We'll probably eventually want to find a way to share
+ // that implementation. For now, we will use a very simplified implementation
+ // to get cleanups working.
+ llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack;
+
+private:
+ /// The CGF this Stack belong to
+ CIRGenFunction *cgf = nullptr;
+
+public:
+ EHScopeStack() = default;
+ ~EHScopeStack() = default;
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
+ cleanupStack.push_back(std::make_unique<T>(a...));
+ }
+
+ void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
+
+ size_t getStackDepth() const { return cleanupStack.size(); }
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
diff --git a/clang/test/CIR/CodeGen/cleanup.cpp b/clang/test/CIR/CodeGen/cleanup.cpp
new file mode 100644
index 0000000000000..4196151352b79
--- /dev/null
+++ b/clang/test/CIR/CodeGen/cleanup.cpp
@@ -0,0 +1,83 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+
+struct Struk {
+ ~Struk();
+};
+
+// CHECK: !rec_Struk = !cir.record<struct "Struk" padded {!u8i}>
+
+// CHECK: cir.func{{.*}} @_ZN5StrukD1Ev(!cir.ptr<!rec_Struk>)
+
+void test_cleanup() {
+ Struk s;
+}
+
+// CHECK: cir.func{{.*}} @_Z12test_cleanupv()
+// CHECK: %[[S_ADDR:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, ["s"]
+// CHECK: cir.call @_ZN5StrukD1Ev(%[[S_ADDR]]) nothrow : (!cir.ptr<!rec_Struk>) -> ()
+// CHECK: cir.return
+
+void test_cleanup_ifelse(bool b) {
+ if (b) {
+ Struk s;
+ } else {
+ Struk s;
+ }
+}
+
+// CHECK: cir.func{{.*}} @_Z19test_cleanup_ifelseb(%arg0: !cir.bool
+// CHECK: cir.scope {
+// CHECK: %[[B:.*]] = cir.load{{.*}} %0 : !cir.ptr<!cir.bool>
+// CHECK: cir.if %[[B]] {
+// CHECK: %[[S:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, ["s"]
+// CHECK: cir.call @_ZN5StrukD1Ev(%[[S]]) nothrow : (!cir.ptr<!rec_Struk>) -> ()
+// CHECK: } else {
+// CHECK: %[[S_TOO:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, ["s"]
+// CHECK: cir.call @_ZN5StrukD1Ev(%[[S_TOO]]) nothrow : (!cir.ptr<!rec_Struk>) -> ()
+// CHECK: }
+// CHECK: }
+// CHECK: cir.return
+
+void test_cleanup_for() {
+ for (int i = 0; i < 10; i++) {
+ Struk s;
+ }
+}
+
+// CHECK: cir.func{{.*}} @_Z16test_cleanup_forv()
+// CHECK: cir.scope {
+// CHECK: cir.for : cond {
+// CHECK: } body {
+// CHECK: cir.scope {
+// CHECK: %[[S:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, ["s"]
+// CHECK: cir.call @_ZN5StrukD1Ev(%[[S]]) nothrow : (!cir.ptr<!rec_Struk>) -> ()
+// CHECK: }
+// CHECK: cir.yield
+// CHECK: } step {
+// CHECK: }
+// CHECK: }
+// CHECK: cir.return
+
+void test_cleanup_nested() {
+ Struk outer;
+ {
+ Struk middle;
+ {
+ Struk inner;
+ }
+ }
+}
+
+// CHECK: cir.func{{.*}} @_Z19test_cleanup_nestedv()
+// CHECK: %[[OUTER:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, ["outer"]
+// CHECK: cir.scope {
+// CHECK: %[[MIDDLE:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, ["middle"]
+// CHECK: cir.scope {
+// CHECK: %[[INNER:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, ["inner"]
+// CHECK: cir.call @_ZN5StrukD1Ev(%[[INNER]]) nothrow : (!cir.ptr<!rec_Struk>) -> ()
+// CHECK: }
+// CHECK: cir.call @_ZN5StrukD1Ev(%[[MIDDLE]]) nothrow : (!cir.ptr<!rec_Struk>) -> ()
+// CHECK: }
+// CHECK: cir.call @_ZN5StrukD1Ev(%[[OUTER]]) nothrow : (!cir.ptr<!rec_Struk>) -> ()
+// CHECK: cir.return
More information about the cfe-commits
mailing list