[clang] 6bd8820 - [CIR] Add support for delete cleanup after new operators (#184707)
via cfe-commits
cfe-commits at lists.llvm.org
Thu Mar 5 13:13:47 PST 2026
Author: Andy Kaylor
Date: 2026-03-05T21:13:42Z
New Revision: 6bd8820704d4d41fb3e41ec7032ce8ef2330bf91
URL: https://github.com/llvm/llvm-project/commit/6bd8820704d4d41fb3e41ec7032ce8ef2330bf91
DIFF: https://github.com/llvm/llvm-project/commit/6bd8820704d4d41fb3e41ec7032ce8ef2330bf91.diff
LOG: [CIR] Add support for delete cleanup after new operators (#184707)
This adds support for calling operator delete when an exception is
thrown during initialization following an operator new call.
This does not yet handle the case where a temporary object is
materialized during the object initialization. That case is marked by
the "setupCleanupBlockActivation" diagnostic in deactivateCleanupBlock
and will be implemented in a future change.
Added:
clang/test/CIR/CodeGen/new-delete.cpp
Modified:
clang/include/clang/CIR/MissingFeatures.h
clang/lib/CIR/CodeGen/CIRGenCall.cpp
clang/lib/CIR/CodeGen/CIRGenCall.h
clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
clang/lib/CIR/CodeGen/CIRGenFunction.h
clang/lib/CIR/CodeGen/EHScopeStack.h
Removed:
################################################################################
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index d206503d914f5..1e3a2c9af35d1 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -291,7 +291,6 @@ struct MissingFeatures {
static bool handleBuiltinICEArguments() { return false; }
static bool hip() { return false; }
static bool incrementProfileCounter() { return false; }
- static bool innermostEHScope() { return false; }
static bool insertBuiltinUnpredictable() { return false; }
static bool instrumentation() { return false; }
static bool intrinsicElementTypeSupport() { return false; }
@@ -348,6 +347,7 @@ struct MissingFeatures {
static bool targetCodeGenInfoGetNullPointer() { return false; }
static bool thunks() { return false; }
static bool tryEmitAsConstant() { return false; }
+ static bool typeAwareAllocation() { return false; }
static bool typeChecks() { return false; }
static bool useEHCleanupForArray() { return false; }
static bool vaArgABILowering() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index 157dc3fdd56fb..61ccd85cd6342 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -1021,6 +1021,16 @@ CIRGenTypes::arrangeFunctionDeclaration(const FunctionDecl *fd) {
return arrangeFreeFunctionType(funcTy.castAs<FunctionProtoType>());
}
+RValue CallArg::getRValue(CIRGenFunction &cgf, mlir::Location loc) const {
+ if (!hasLV)
+ return rv;
+ LValue copy = cgf.makeAddrLValue(cgf.createMemTemp(ty, loc), ty);
+ cgf.emitAggregateCopy(copy, lv, ty, AggValueSlot::DoesNotOverlap,
+ lv.isVolatile());
+ isUsed = true;
+ return RValue::getAggregate(copy.getAddress());
+}
+
static cir::CIRCallOpInterface
emitCallLikeOp(CIRGenFunction &cgf, mlir::Location callLoc,
cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index 347bd4a7c8266..b30b4969ca45e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -202,7 +202,7 @@ struct CallArg {
/// A data-flow flag to make sure getRValue and/or copyInto are not
/// called twice for duplicated IR emission.
- [[maybe_unused]] mutable bool isUsed;
+ mutable bool isUsed;
public:
clang::QualType ty;
@@ -215,6 +215,10 @@ struct CallArg {
bool hasLValue() const { return hasLV; }
+ /// \returns an independent RValue. If the CallArg contains an LValue,
+ /// a temporary copy is returned.
+ RValue getRValue(CIRGenFunction &cgf, mlir::Location loc) const;
+
LValue getKnownLValue() const {
assert(hasLV && !isUsed);
return lv;
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index bdb2947200f23..cbed8452810c5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -95,7 +95,6 @@ void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
bool isLifetimeMarker = kind & LifetimeMarker;
bool skipCleanupScope = false;
- assert(!cir::MissingFeatures::innermostEHScope());
cir::CleanupKind cleanupKind = cir::CleanupKind::All;
if (isEHCleanup && cgf->getLangOpts().Exceptions) {
cleanupKind =
@@ -193,6 +192,25 @@ bool EHScopeStack::requiresCatchOrCleanup() const {
return false;
}
+/// Deactive a cleanup that was created in an active state.
+void CIRGenFunction::deactivateCleanupBlock(EHScopeStack::stable_iterator c,
+ mlir::Operation *dominatingIP) {
+ assert(c != ehStack.stable_end() && "deactivating bottom of stack?");
+ EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.find(c));
+ assert(scope.isActive() && "double deactivation");
+
+ // If it's the top of the stack, just pop it, but do so only if it belongs
+ // to the current RunCleanupsScope.
+ if (c == ehStack.stable_begin() &&
+ currentCleanupStackDepth.strictlyEncloses(c)) {
+ popCleanupBlock();
+ return;
+ }
+
+ // Otherwise, follow the general case.
+ cgm.errorNYI("deactivateCleanupBlock: setupCleanupBlockActivation");
+}
+
static void emitCleanup(CIRGenFunction &cgf, cir::CleanupScopeOp cleanupScope,
EHScopeStack::Cleanup *cleanup,
EHScopeStack::Cleanup::Flags flags) {
@@ -245,10 +263,11 @@ void CIRGenFunction::popCleanupBlock() {
bool hasFallthrough = fallthroughSource != nullptr && isActive;
bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+ bool requiresEHCleanup = scope.isEHCleanup() && hasFallthrough;
// If we don't need the cleanup at all, we're done.
assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
- if (!requiresNormalCleanup) {
+ if (!requiresNormalCleanup && !requiresEHCleanup) {
ehStack.popCleanup();
return;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 97f496c89ab0f..35f74e7120b0b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/CIR/MissingFeatures.h"
+#include "llvm/Support/TrailingObjects.h"
using namespace clang;
using namespace clang::CIRGen;
@@ -647,6 +648,209 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
return size;
}
+/// Emit a call to an operator new or operator delete function, as implicitly
+/// created by new-expressions and delete-expressions.
+static RValue emitNewDeleteCall(CIRGenFunction &cgf,
+ const FunctionDecl *calleeDecl,
+ const FunctionProtoType *calleeType,
+ const CallArgList &args) {
+ cir::CIRCallOpInterface callOrTryCall;
+ cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
+ CIRGenCallee callee =
+ CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
+ RValue rv =
+ cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, calleeType),
+ callee, ReturnValueSlot(), args, &callOrTryCall);
+
+ /// C++1y [expr.new]p10:
+ /// [In a new-expression,] an implementation is allowed to omit a call
+ /// to a replaceable global allocation function.
+ ///
+ /// We model such elidable calls with the 'builtin' attribute.
+ assert(!cir::MissingFeatures::attributeBuiltin());
+ return rv;
+}
+
+RValue CIRGenFunction::emitNewOrDeleteBuiltinCall(const FunctionProtoType *type,
+ const CallExpr *callExpr,
+ OverloadedOperatorKind op) {
+ CallArgList args;
+ emitCallArgs(args, type, callExpr->arguments());
+ // Find the allocation or deallocation function that we're calling.
+ ASTContext &astContext = getContext();
+ assert(op == OO_New || op == OO_Delete);
+ DeclarationName name = astContext.DeclarationNames.getCXXOperatorName(op);
+
+ clang::DeclContextLookupResult lookupResult =
+ astContext.getTranslationUnitDecl()->lookup(name);
+ for (const NamedDecl *decl : lookupResult) {
+ if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
+ if (astContext.hasSameType(funcDecl->getType().getTypePtr(), type)) {
+ if (sanOpts.has(SanitizerKind::AllocToken)) {
+ // TODO: Set !alloc_token metadata.
+ assert(!cir::MissingFeatures::allocToken());
+ cgm.errorNYI("Alloc token sanitizer not yet supported!");
+ }
+
+ // Emit the call to operator new/delete.
+ return emitNewDeleteCall(*this, funcDecl, type, args);
+ }
+ }
+ }
+
+ llvm_unreachable("predeclared global operator new/delete is missing");
+}
+
+namespace {
+template <typename Traits> struct PlacementArg {
+ typename Traits::RValueTy argValue;
+ QualType argType;
+};
+
+/// A cleanup to call the given 'operator delete' function upon abnormal
+/// exit from a new expression. Templated on a traits type that deals with
+/// ensuring that the arguments dominate the cleanup if necessary.
+template <typename Traits>
+class CallDeleteDuringNew final
+ : public EHScopeStack::Cleanup,
+ private llvm::TrailingObjects<CallDeleteDuringNew<Traits>,
+ PlacementArg<Traits>> {
+ using TrailingObj =
+ llvm::TrailingObjects<CallDeleteDuringNew<Traits>, PlacementArg<Traits>>;
+ friend TrailingObj;
+ using TrailingObj::getTrailingObjects;
+
+ /// Type used to hold llvm::Value*s.
+ typedef typename Traits::ValueTy ValueTy;
+ /// Type used to hold RValues.
+ typedef typename Traits::RValueTy RValueTy;
+
+ unsigned numPlacementArgs : 30;
+ LLVM_PREFERRED_TYPE(AlignedAllocationMode)
+ unsigned passAlignmentToPlacementDelete : 1;
+ const FunctionDecl *operatorDelete;
+ ValueTy ptr;
+ ValueTy allocSize;
+ CharUnits allocAlign;
+
+ PlacementArg<Traits> *getPlacementArgs() { return getTrailingObjects(); }
+
+ void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
+ assert(i < numPlacementArgs && "index out of range");
+ getPlacementArgs()[i] = {argValue, argType};
+ }
+
+public:
+ static size_t getExtraSize(size_t numPlacementArgs) {
+ return TrailingObj::template additionalSizeToAlloc<PlacementArg<Traits>>(
+ numPlacementArgs);
+ }
+
+ CallDeleteDuringNew(size_t numPlacementArgs,
+ const FunctionDecl *operatorDelete, ValueTy ptr,
+ ValueTy allocSize,
+ const ImplicitAllocationParameters &iap,
+ CharUnits allocAlign, const CallArgList *newArgs,
+ unsigned numNonPlacementArgs, CIRGenFunction *cgf,
+ mlir::Location loc)
+ : numPlacementArgs(numPlacementArgs),
+ passAlignmentToPlacementDelete(isAlignedAllocation(iap.PassAlignment)),
+ operatorDelete(operatorDelete), ptr(ptr), allocSize(allocSize),
+ allocAlign(allocAlign) {
+ for (unsigned i = 0, n = numPlacementArgs; i != n; ++i) {
+ const CallArg &arg = (*newArgs)[i + numNonPlacementArgs];
+ setPlacementArg(i, arg.getRValue(*cgf, loc), arg.ty);
+ }
+ }
+
+ void emit(CIRGenFunction &cgf, Flags flags) override {
+ const auto *fpt = operatorDelete->getType()->castAs<FunctionProtoType>();
+ CallArgList deleteArgs;
+
+ unsigned firstNonTypeArg = 0;
+ TypeAwareAllocationMode typeAwareDeallocation = TypeAwareAllocationMode::No;
+ assert(!cir::MissingFeatures::typeAwareAllocation());
+
+ // The first argument after type-identity parameter (if any) is always
+ // a void* (or C* for a destroying operator delete for class type C).
+ deleteArgs.add(Traits::get(cgf, ptr), fpt->getParamType(firstNonTypeArg));
+
+ // Figure out what other parameters we should be implicitly passing.
+ UsualDeleteParams params;
+ if (numPlacementArgs) {
+ // A placement deallocation function is implicitly passed an alignment
+ // if the placement allocation function was, but is never passed a size.
+ params.Alignment =
+ alignedAllocationModeFromBool(passAlignmentToPlacementDelete);
+ params.TypeAwareDelete = typeAwareDeallocation;
+ params.Size = isTypeAwareAllocation(params.TypeAwareDelete);
+ } else {
+ // For a non-placement new-expression, 'operator delete' can take a
+ // size and/or an alignment if it has the right parameters.
+ params = operatorDelete->getUsualDeleteParams();
+ }
+
+ assert(!params.DestroyingDelete &&
+ "should not call destroying delete in a new-expression");
+
+ // The second argument can be a std::size_t (for non-placement delete).
+ if (params.Size)
+ deleteArgs.add(Traits::get(cgf, allocSize),
+ cgf.getContext().getSizeType());
+
+ // The next (second or third) argument can be a std::align_val_t, which
+ // is an enum whose underlying type is std::size_t.
+ // FIXME: Use the right type as the parameter type. Note that in a call
+ // to operator delete(size_t, ...), we may not have it available.
+ if (isAlignedAllocation(params.Alignment))
+ cgf.cgm.errorNYI("CallDeleteDuringNew: aligned allocation");
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned i = 0; i != numPlacementArgs; ++i) {
+ auto arg = getPlacementArgs()[i];
+ deleteArgs.add(Traits::get(cgf, arg.argValue), arg.argType);
+ }
+
+ // Call 'operator delete'.
+ emitNewDeleteCall(cgf, operatorDelete, fpt, deleteArgs);
+ }
+};
+} // namespace
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void enterNewDeleteCleanup(CIRGenFunction &cgf, const CXXNewExpr *e,
+ Address newPtr, mlir::Value allocSize,
+ CharUnits allocAlign,
+ const CallArgList &newArgs) {
+ unsigned numNonPlacementArgs = e->getNumImplicitArgs();
+
+ // If we're not inside a conditional branch, then the cleanup will
+ // dominate and we can do the easier (and more efficient) thing.
+ if (!cgf.isInConditionalBranch()) {
+ struct DirectCleanupTraits {
+ typedef mlir::Value ValueTy;
+ typedef RValue RValueTy;
+ static RValue get(CIRGenFunction &, ValueTy v) { return RValue::get(v); }
+ static RValue get(CIRGenFunction &, RValueTy v) { return v; }
+ };
+
+ typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
+
+ assert(!cir::MissingFeatures::typeAwareAllocation());
+ cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
+ EHCleanup, e->getNumPlacementArgs(), e->getOperatorDelete(),
+ newPtr.getPointer(), allocSize, e->implicitAllocationParameters(),
+ allocAlign, &newArgs, numNonPlacementArgs, &cgf,
+ cgf.getLoc(e->getSourceRange()));
+
+ return;
+ }
+
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "enterNewDeleteCleanup: conditional branch");
+}
+
static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init,
QualType allocType, Address newPtr,
AggValueSlot::Overlap_t mayOverlap) {
@@ -912,59 +1116,6 @@ RValue CIRGenFunction::emitCXXPseudoDestructorExpr(
return RValue::get(nullptr);
}
-/// Emit a call to an operator new or operator delete function, as implicitly
-/// created by new-expressions and delete-expressions.
-static RValue emitNewDeleteCall(CIRGenFunction &cgf,
- const FunctionDecl *calleeDecl,
- const FunctionProtoType *calleeType,
- const CallArgList &args) {
- cir::CIRCallOpInterface callOrTryCall;
- cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
- CIRGenCallee callee =
- CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
- RValue rv =
- cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, calleeType),
- callee, ReturnValueSlot(), args, &callOrTryCall);
-
- /// C++1y [expr.new]p10:
- /// [In a new-expression,] an implementation is allowed to omit a call
- /// to a replaceable global allocation function.
- ///
- /// We model such elidable calls with the 'builtin' attribute.
- assert(!cir::MissingFeatures::attributeBuiltin());
- return rv;
-}
-
-RValue CIRGenFunction::emitNewOrDeleteBuiltinCall(const FunctionProtoType *type,
- const CallExpr *callExpr,
- OverloadedOperatorKind op) {
- CallArgList args;
- emitCallArgs(args, type, callExpr->arguments());
- // Find the allocation or deallocation function that we're calling.
- ASTContext &astContext = getContext();
- assert(op == OO_New || op == OO_Delete);
- DeclarationName name = astContext.DeclarationNames.getCXXOperatorName(op);
-
- clang::DeclContextLookupResult lookupResult =
- astContext.getTranslationUnitDecl()->lookup(name);
- for (const auto *decl : lookupResult) {
- if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
- if (astContext.hasSameType(funcDecl->getType(), QualType(type, 0))) {
- if (sanOpts.has(SanitizerKind::AllocToken)) {
- // TODO: Set !alloc_token metadata.
- assert(!cir::MissingFeatures::allocToken());
- cgm.errorNYI("Alloc token sanitizer not yet supported!");
- }
-
- // Emit the call to operator new/delete.
- return emitNewDeleteCall(*this, funcDecl, type, args);
- }
- }
- }
-
- llvm_unreachable("predeclared global operator new/delete is missing");
-}
-
namespace {
/// Calls the given 'operator delete' on a single object.
struct CallObjectDelete final : EHScopeStack::Cleanup {
@@ -1190,10 +1341,24 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
// If there's an operator delete, enter a cleanup to call it if an
- // exception is thrown.
- if (e->getOperatorDelete() &&
- !e->getOperatorDelete()->isReservedGlobalPlacementOperator())
- cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
+ // exception is thrown. If we do this, we'll be creating the result pointer
+ // inside a cleanup scope, either with a bitcast or an offset based on the
+ // array cookie size. However, we need to return that pointer from outside
+ // the cleanup scope, so we need to store it in a temporary variable.
+ bool useNewDeleteCleanup =
+ e->getOperatorDelete() &&
+ !e->getOperatorDelete()->isReservedGlobalPlacementOperator();
+ EHScopeStack::stable_iterator operatorDeleteCleanup;
+ mlir::Operation *cleanupDominator = nullptr;
+ if (useNewDeleteCleanup) {
+ assert(!cir::MissingFeatures::typeAwareAllocation());
+ enterNewDeleteCleanup(*this, e, allocation, allocSize, allocAlign,
+ allocatorArgs);
+ operatorDeleteCleanup = ehStack.stable_begin();
+ cleanupDominator =
+ cir::UnreachableOp::create(builder, getLoc(e->getSourceRange()))
+ .getOperation();
+ }
if (allocSize != allocSizeWithoutCookie) {
assert(e->isArray());
@@ -1212,6 +1377,16 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
Address result = builder.createElementBitCast(getLoc(e->getSourceRange()),
allocation, elementTy);
+ // If we're inside a new delete cleanup, store the result pointer.
+ Address resultPtr = Address::invalid();
+ if (useNewDeleteCleanup) {
+ resultPtr =
+ createTempAlloca(builder.getPointerTo(elementTy), result.getAlignment(),
+ getLoc(e->getSourceRange()), "__new_result");
+ builder.createStore(getLoc(e->getSourceRange()), result.getPointer(),
+ resultPtr);
+ }
+
// Passing pointer through launder.invariant.group to avoid propagation of
// vptrs information which may be included in previous type.
// To not break LTO with
diff erent optimizations levels, we do it regardless
@@ -1224,6 +1399,21 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
+
+ // Deactivate the 'operator delete' cleanup if we finished
+ // initialization.
+ if (useNewDeleteCleanup) {
+ assert(operatorDeleteCleanup.isValid());
+ assert(resultPtr.isValid());
+ deactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
+ cleanupDominator->erase();
+ cir::LoadOp loadResult =
+ builder.createLoad(getLoc(e->getSourceRange()), resultPtr);
+ result = result.withPointer(loadResult.getResult());
+ }
+
+ assert(!cir::MissingFeatures::exprNewNullCheck());
+
return result.getPointer();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 539d7839d1dfe..0e82958ef6f39 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -970,6 +970,16 @@ class CIRGenFunction : public CIRGenTypeCache {
ArrayRef<mlir::Value *> valuesToReload = {});
void popCleanupBlock();
+ /// Deactivates the given cleanup block. The block cannot be reactivated. Pops
+ /// it if it's the top of the stack.
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup,
+ mlir::Operation *dominatingIP);
+
/// Push a cleanup to be run at the end of the current full-expression. Safe
/// against the possibility that we're currently inside a
/// conditionally-evaluated expression.
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 9d614c858dbe1..09b78820a2587 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -187,6 +187,25 @@ class EHScopeStack {
[[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
}
+ /// Push a cleanup with non-constant storage requirements on the
+ /// stack. The cleanup type must provide an additional static method:
+ /// static size_t getExtraSize(size_t);
+ /// The argument to this method will be the value N, which will also
+ /// be passed as the first argument to the constructor.
+ ///
+ /// The data stored in the extra storage must obey the same
+ /// restrictions as normal cleanup member data.
+ ///
+ /// The pointer returned from this method is valid until the cleanup
+ /// stack is modified.
+ template <class T, class... As>
+ T *pushCleanupWithExtra(CleanupKind kind, size_t n, As... a) {
+ static_assert(alignof(T) <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *buffer = pushCleanup(kind, sizeof(T) + T::getExtraSize(n));
+ return new (buffer) T(n, a...);
+ }
+
void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
/// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
diff --git a/clang/test/CIR/CodeGen/new-delete.cpp b/clang/test/CIR/CodeGen/new-delete.cpp
new file mode 100644
index 0000000000000..58db8f8646f4c
--- /dev/null
+++ b/clang/test/CIR/CodeGen/new-delete.cpp
@@ -0,0 +1,164 @@
+// RUN: %clang_cc1 -no-enable-noundef-analysis %s -triple=x86_64-linux-gnu -fclangir -emit-cir -std=c++98 -fcxx-exceptions -fexceptions -o %t.cir
+// RUN: FileCheck -check-prefixes=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis %s -triple=x86_64-linux-gnu -fclangir -emit-llvm -std=c++98 -fcxx-exceptions -fexceptions -o %t-cir.ll
+// RUN: FileCheck -check-prefixes=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -no-enable-noundef-analysis %s -triple=x86_64-linux-gnu -emit-llvm -std=c++98 -fcxx-exceptions -fexceptions -o %t.ll
+// RUN: FileCheck -check-prefixes=OGCG --input-file=%t.ll %s
+
+
+struct A { A(int); ~A(); void *p; };
+
+A *a() {
+ return new A(5);
+}
+
+// CIR: cir.func {{.*}} @_Z1av() -> !cir.ptr<!rec_A> {
+// CIR: %[[RETVAL:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["__retval"]
+// CIR: %[[NEW_RESULT:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["__new_result"]
+// CIR: %[[ALLOC_SIZE:.*]] = cir.const #cir.int<8> : !u64i
+// CIR: %[[PTR:.*]] = cir.call @_Znwm(%[[ALLOC_SIZE]])
+// CIR: cir.cleanup.scope {
+// CIR: %[[PTR_A:.*]] = cir.cast bitcast %[[PTR]] : !cir.ptr<!void> -> !cir.ptr<!rec_A>
+// CIR: cir.store{{.*}} %[[PTR_A]], %[[NEW_RESULT]] : !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>
+// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.call @_ZN1AC1Ei(%[[PTR_A]], %[[FIVE]])
+// CIR: cir.yield
+// CIR: } cleanup eh {
+// CIR: cir.call @_ZdlPv(%[[PTR]]) nothrow : (!cir.ptr<!void>) -> ()
+// CIR: cir.yield
+// CIR: }
+
+// LLVM: define {{.*}} ptr @_Z1av() {{.*}} personality ptr @__gxx_personality_v0 {
+// LLVM: %[[RETVAL:.*]] = alloca ptr
+// LLVM: %[[NEW_RESULT:.*]] = alloca ptr
+// LLVM: %[[PTR:.*]] = call ptr @_Znwm(i64 8)
+// LLVM: br label %[[EH_SCOPE:.*]]
+// LLVM: [[EH_SCOPE]]:
+// LLVM: store ptr %[[PTR]], ptr %[[NEW_RESULT]]
+// LLVM: invoke void @_ZN1AC1Ei(ptr %[[PTR]], i32 5)
+// LLVM: to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM: br label %[[EH_SCOPE_END:.*]]
+// LLVM: [[UNWIND]]:
+// LLVM: %[[EXN:.*]] = landingpad { ptr, i32 }
+// LLVM: cleanup
+// LLVM: %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// LLVM: %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// LLVM: br label %[[EH_CLEANUP:.*]]
+// LLVM: [[EH_CLEANUP]]:
+// LLVM: %[[EXN_PTR_PHI:.*]] = phi ptr [ %[[EXN_PTR]], %[[UNWIND]] ]
+// LLVM: %[[TYPEID_PHI:.*]] = phi i32 [ %[[TYPEID]], %[[UNWIND]] ]
+// LLVM: call void @_ZdlPv(ptr %[[PTR]])
+// LLVM: %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_PTR_PHI]], 0
+// LLVM: %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], i32 %[[TYPEID_PHI]], 1
+// LLVM: resume { ptr, i32 } %[[EXN_INSERT_2]]
+// LLVM: [[EH_SCOPE_END]]:
+// LLVM: %[[LOAD:.*]] = load ptr, ptr %[[NEW_RESULT]]
+// LLVM: store ptr %[[LOAD]], ptr %[[RETVAL]]
+// LLVM: %[[RET:.*]] = load ptr, ptr %[[RETVAL]]
+// LLVM: ret ptr %[[RET]]
+
+// OGCG: define {{.*}} ptr @_Z1av() {{.*}} personality ptr @__gxx_personality_v0 {
+// OGCG: %[[EXN_SLOT:.*]] = alloca ptr
+// OGCG: %[[EHSELECTOR_SLOT:.*]] = alloca i32
+// OGCG: %[[PTR:.*]] = call {{.*}} ptr @_Znwm(i64 8)
+// OGCG: invoke void @_ZN1AC1Ei(ptr {{.*}} %[[PTR]], i32 5)
+// OGCG: to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// OGCG: [[INVOKE_CONT]]:
+// OGCG: ret ptr %[[PTR]]
+// OGCG: [[UNWIND]]:
+// OGCG: %[[EXN:.*]] = landingpad { ptr, i32 }
+// OGCG: cleanup
+// OGCG: %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// OGCG: store ptr %[[EXN_PTR]], ptr %[[EXN_SLOT]]
+// OGCG: %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// OGCG: store i32 %[[TYPEID]], ptr %[[EHSELECTOR_SLOT]]
+// OGCG: call void @_ZdlPv(ptr %[[PTR]])
+// OGCG: br label %[[EH_RESUME:.*]]
+// OGCG: [[EH_RESUME]]:
+// OGCG: %[[EXN_PTR:.*]] = load ptr, ptr %[[EXN_SLOT]]
+// OGCG: %[[EHSELECTOR:.*]] = load i32, ptr %[[EHSELECTOR_SLOT]]
+// OGCG: %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_PTR]], 0
+// OGCG: %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], i32 %[[EHSELECTOR]], 1
+// OGCG: resume { ptr, i32 } %[[EXN_INSERT_2]]
+
+A *b() {
+ extern int foo();
+ return new A(foo());
+}
+
+// CIR: cir.func {{.*}} @_Z1bv() -> !cir.ptr<!rec_A> {
+// CIR: %[[RETVAL:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["__retval"]
+// CIR: %[[NEW_RESULT:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["__new_result"]
+// CIR: %[[ALLOC_SIZE:.*]] = cir.const #cir.int<8> : !u64i
+// CIR: %[[PTR:.*]] = cir.call @_Znwm(%[[ALLOC_SIZE]])
+// CIR: cir.cleanup.scope {
+// CIR: %[[PTR_A:.*]] = cir.cast bitcast %[[PTR]] : !cir.ptr<!void> -> !cir.ptr<!rec_A>
+// CIR: cir.store{{.*}} %[[PTR_A]], %[[NEW_RESULT]] : !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>
+// CIR: %[[FOO:.*]] = cir.call @_Z3foov() : () -> !s32i
+// CIR: cir.call @_ZN1AC1Ei(%[[PTR_A]], %[[FOO]])
+// CIR: cir.yield
+// CIR: } cleanup eh {
+// CIR: cir.call @_ZdlPv(%[[PTR]]) nothrow : (!cir.ptr<!void>) -> ()
+// CIR: cir.yield
+// CIR: }
+
+// LLVM: define {{.*}} ptr @_Z1bv() {{.*}} personality ptr @__gxx_personality_v0 {
+// LLVM: %[[RETVAL:.*]] = alloca ptr
+// LLVM: %[[NEW_RESULT:.*]] = alloca ptr
+// LLVM: %[[PTR:.*]] = call ptr @_Znwm(i64 8)
+// LLVM: br label %[[EH_SCOPE:.*]]
+// LLVM: [[EH_SCOPE]]:
+// LLVM: store ptr %[[PTR]], ptr %[[NEW_RESULT]]
+// LLVM: %[[FOO:.*]] = invoke i32 @_Z3foov()
+// LLVM: to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM: invoke void @_ZN1AC1Ei(ptr %[[PTR]], i32 %[[FOO]])
+// LLVM: to label %[[INVOKE_CONT_2:.*]] unwind label %[[UNWIND:.*]]
+// LLVM: [[INVOKE_CONT_2]]:
+// LLVM: br label %[[EH_SCOPE_END:.*]]
+// LLVM: [[UNWIND]]:
+// LLVM: %[[EXN:.*]] = landingpad { ptr, i32 }
+// LLVM: cleanup
+// LLVM: %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// LLVM: %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// LLVM: br label %[[EH_CLEANUP:.*]]
+// LLVM: [[EH_CLEANUP]]:
+// LLVM: %[[EXN_PTR_PHI:.*]] = phi ptr [ %[[EXN_PTR]], %[[UNWIND]] ]
+// LLVM: %[[TYPEID_PHI:.*]] = phi i32 [ %[[TYPEID]], %[[UNWIND]] ]
+// LLVM: call void @_ZdlPv(ptr %[[PTR]])
+// LLVM: %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_PTR_PHI]], 0
+// LLVM: %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], i32 %[[TYPEID_PHI]], 1
+// LLVM: resume { ptr, i32 } %[[EXN_INSERT_2]]
+// LLVM: [[EH_SCOPE_END]]:
+// LLVM: %[[LOAD:.*]] = load ptr, ptr %[[NEW_RESULT]]
+// LLVM: store ptr %[[LOAD]], ptr %[[RETVAL]]
+// LLVM: %[[RET:.*]] = load ptr, ptr %[[RETVAL]]
+// LLVM: ret ptr %[[RET]]
+
+// OGCG: define {{.*}} ptr @_Z1bv() {{.*}} personality ptr @__gxx_personality_v0 {
+// OGCG: %[[EXN_SLOT:.*]] = alloca ptr
+// OGCG: %[[EHSELECTOR_SLOT:.*]] = alloca i32
+// OGCG: %[[PTR:.*]] = call {{.*}} ptr @_Znwm(i64 8)
+// OGCG: %[[FOO:.*]] = invoke i32 @_Z3foov()
+// OGCG: to label %[[INVOKE_CONT:.*]] unwind label %[[UNWIND:.*]]
+// OGCG: [[INVOKE_CONT]]:
+// OGCG: invoke void @_ZN1AC1Ei(ptr {{.*}} %[[PTR]], i32 %[[FOO]])
+// OGCG: to label %[[INVOKE_CONT_2:.*]] unwind label %[[UNWIND:.*]]
+// OGCG: [[INVOKE_CONT_2]]:
+// OGCG: ret ptr %[[PTR]]
+// OGCG: [[UNWIND]]:
+// OGCG: %[[EXN:.*]] = landingpad { ptr, i32 }
+// OGCG: cleanup
+// OGCG: %[[EXN_PTR:.*]] = extractvalue { ptr, i32 } %[[EXN]], 0
+// OGCG: store ptr %[[EXN_PTR]], ptr %[[EXN_SLOT]]
+// OGCG: %[[TYPEID:.*]] = extractvalue { ptr, i32 } %[[EXN]], 1
+// OGCG: store i32 %[[TYPEID]], ptr %[[EHSELECTOR_SLOT]]
+// OGCG: call void @_ZdlPv(ptr %[[PTR]])
+// OGCG: br label %[[EH_RESUME:.*]]
+// OGCG: [[EH_RESUME]]:
+// OGCG: %[[EXN_PTR:.*]] = load ptr, ptr %[[EXN_SLOT]]
+// OGCG: %[[EHSELECTOR:.*]] = load i32, ptr %[[EHSELECTOR_SLOT]]
+// OGCG: %[[EXN_INSERT:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_PTR]], 0
+// OGCG: %[[EXN_INSERT_2:.*]] = insertvalue { ptr, i32 } %[[EXN_INSERT]], i32 %[[EHSELECTOR]], 1
+// OGCG: resume { ptr, i32 } %[[EXN_INSERT_2]]
More information about the cfe-commits
mailing list