[clang] [CIR] Handle return with cleanups (PR #163849)
Andy Kaylor via cfe-commits
cfe-commits at lists.llvm.org
Thu Oct 16 11:54:47 PDT 2025
https://github.com/andykaylor created https://github.com/llvm/llvm-project/pull/163849
This adds support for branching through a cleanup block when a return statement is encountered while we're in a scope with cleanups.
>From a7dc9ef74beb523866a15af0cd8092f80113ae6d Mon Sep 17 00:00:00 2001
From: Andy Kaylor <akaylor at nvidia.com>
Date: Thu, 16 Oct 2025 11:32:59 -0700
Subject: [PATCH] [CIR] Handle return with cleanups
This adds support for branching through a cleanup block when a return
statement is encountered while we're in a scope with cleanups.
---
clang/include/clang/CIR/MissingFeatures.h | 5 +
clang/lib/CIR/CodeGen/CIRGenCleanup.cpp | 220 +++++++++++++++++++++-
clang/lib/CIR/CodeGen/CIRGenCleanup.h | 25 ++-
clang/lib/CIR/CodeGen/CIRGenFunction.h | 45 +++++
clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 120 +++++++-----
clang/lib/CIR/CodeGen/EHScopeStack.h | 82 +++++++-
clang/test/CIR/CodeGen/dtors.cpp | 12 +-
clang/test/CIR/CodeGen/lambda.cpp | 24 +--
clang/test/CIR/CodeGen/statement-exprs.c | 10 +-
clang/test/CIR/CodeGen/vla.c | 59 +++++-
10 files changed, 521 insertions(+), 81 deletions(-)
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 4fbae150b587e..4a5027f22d054 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -210,6 +210,9 @@ struct MissingFeatures {
static bool checkBitfieldClipping() { return false; }
static bool cirgenABIInfo() { return false; }
static bool cleanupAfterErrorDiags() { return false; }
+ static bool cleanupAppendInsts() { return false; }
+ static bool cleanupBranchThrough() { return false; }
+ static bool cleanupIndexAndBIAdjustment() { return false; }
static bool cleanupsToDeactivate() { return false; }
static bool constEmitterAggILE() { return false; }
static bool constEmitterArrayILE() { return false; }
@@ -230,6 +233,7 @@ struct MissingFeatures {
static bool deleteArray() { return false; }
static bool devirtualizeMemberFunction() { return false; }
static bool ehCleanupFlags() { return false; }
+ static bool ehCleanupHasPrebranchedFallthrough() { return false; }
static bool ehCleanupScope() { return false; }
static bool ehCleanupScopeRequiresEHCleanup() { return false; }
static bool ehCleanupBranchFixups() { return false; }
@@ -285,6 +289,7 @@ struct MissingFeatures {
static bool setNonGC() { return false; }
static bool setObjCGCLValueClass() { return false; }
static bool setTargetAttributes() { return false; }
+ static bool simplifyCleanupEntry() { return false; }
static bool sourceLanguageCases() { return false; }
static bool stackBase() { return false; }
static bool stackSaveOp() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index 870069715df22..b19ff426805db 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -28,6 +28,46 @@ using namespace clang::CIRGen;
// CIRGenFunction cleanup related
//===----------------------------------------------------------------------===//
+/// Build a unconditional branch to the lexical scope cleanup block
+/// or with the labeled blocked if already solved.
+///
+/// Track on scope basis, goto's we need to fix later.
+cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
+ JumpDest dest) {
+ // Insert a branch: to the cleanup block (unsolved) or to the already
+ // materialized label. Keep track of unsolved goto's.
+ assert(dest.getBlock() && "assumes incoming valid dest");
+ auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator topCleanup =
+ ehStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (topCleanup == ehStack.stable_end() ||
+ topCleanup.encloses(dest.getScopeDepth())) { // works for invalid
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!dest.getScopeDepth().isValid()) {
+ BranchFixup &fixup = ehStack.addBranchFixup();
+ fixup.destination = dest.getBlock();
+ fixup.destinationIndex = dest.getDestIndex();
+ fixup.initialBranch = brOp;
+ fixup.optimisticBranchBlock = nullptr;
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ cgm.errorNYI(loc, "emitBranchThroughCleanup: valid destination scope depth");
+ return brOp;
+}
+
/// Emits all the code to cause the given temporary to be cleaned up.
void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
QualType tempType, Address ptr) {
@@ -40,6 +80,19 @@ void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
void EHScopeStack::Cleanup::anchor() {}
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ stable_iterator si = getInnermostNormalCleanup();
+ stable_iterator se = stable_end();
+ while (si != se) {
+ EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive())
+ return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
/// Push an entry of the given size onto this protected-scope stack.
char *EHScopeStack::allocate(size_t size) {
size = llvm::alignTo(size, ScopeStackAlignment);
@@ -75,14 +128,30 @@ void EHScopeStack::deallocate(size_t size) {
startOfData += llvm::alignTo(size, ScopeStackAlignment);
}
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ cgf->cgm.errorNYI("popNullFixups");
+}
+
void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
+ bool isNormalCleanup = kind & NormalCleanup;
bool isEHCleanup = kind & EHCleanup;
bool isLifetimeMarker = kind & LifetimeMarker;
assert(!cir::MissingFeatures::innermostEHScope());
- EHCleanupScope *scope = new (buffer) EHCleanupScope(size);
+ EHCleanupScope *scope = new (buffer)
+ EHCleanupScope(size, branchFixups.size(), innermostNormalCleanup);
+
+ if (isNormalCleanup)
+ innermostNormalCleanup = stable_begin();
if (isLifetimeMarker)
cgf->cgm.errorNYI("push lifetime marker cleanup");
@@ -100,12 +169,23 @@ void EHScopeStack::popCleanup() {
assert(isa<EHCleanupScope>(*begin()));
EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
+ innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
deallocate(cleanup.getAllocatedSize());
// Destroy the cleanup.
cleanup.destroy();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // Check whether we can shrink the branch-fixups stack.
+ if (!branchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups()) {
+ branchFixups.clear();
+ } else {
+ // Otherwise we can still trim out unnecessary nulls.
+ popNullFixups();
+ }
+ }
}
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
@@ -116,6 +196,18 @@ static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
}
+static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
+ EHCleanupScope &scope) {
+ assert(scope.isNormalCleanup());
+ mlir::Block *entry = scope.getNormalBlock();
+ if (!entry) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ entry = cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ scope.setNormalBlock(entry);
+ }
+ return entry;
+}
+
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
@@ -123,17 +215,21 @@ void CIRGenFunction::popCleanupBlock() {
assert(!ehStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin());
+ assert(scope.getFixupDepth() <= ehStack.getNumBranchFixups());
// Remember activation information.
bool isActive = scope.isActive();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // - whether there are branch fix-ups through this cleanup
+ unsigned fixupDepth = scope.getFixupDepth();
+ bool hasFixups = ehStack.getNumBranchFixups() != fixupDepth;
// - whether there's a fallthrough
mlir::Block *fallthroughSource = builder.getInsertionBlock();
bool hasFallthrough = fallthroughSource != nullptr && isActive;
- bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+ bool requiresNormalCleanup =
+ scope.isNormalCleanup() && (hasFixups || hasFallthrough);
// If we don't need the cleanup at all, we're done.
assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
@@ -168,9 +264,119 @@ void CIRGenFunction::popCleanupBlock() {
assert(!cir::MissingFeatures::ehCleanupFlags());
- ehStack.popCleanup();
- scope.markEmitted();
- emitCleanup(*this, cleanup);
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (hasFallthrough && !hasFixups) {
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+ ehStack.popCleanup();
+ scope.markEmitted();
+ emitCleanup(*this, cleanup);
+ } else {
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+
+ // Force the entry block to exist.
+ mlir::Block *normalEntry = createNormalEntry(*this, scope);
+
+ // I. Set up the fallthrough edge in.
+ mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (hasFallthrough) {
+ assert(!cir::MissingFeatures::ehCleanupHasPrebranchedFallthrough());
+
+ } else if (fallthroughSource) {
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ assert(!isActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = builder.saveInsertionPoint();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ builder.setInsertionPointToEnd(normalEntry);
+
+ // intercept normal cleanup to mark SEH scope end
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+ bool hasEnclosingCleanups =
+ (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ if (hasFixups && hasEnclosingCleanups)
+ cgm.errorNYI("cleanup branch-through dest");
+
+ mlir::Block *fallthroughDest = nullptr;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ // Skip for SEH, since ExitSwitch is used to generate code to indicate
+ // abnormal termination. (SEH: Except _leave and fall-through at
+ // the end, all other exits in a _try (return/goto/continue/break)
+ // are considered as abnormal terminations, using NormalCleanupDestSlot
+ // to indicate abnormal termination)
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // IV. Pop the cleanup and emit it.
+ scope.markEmitted();
+ ehStack.popCleanup();
+ assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
+
+ emitCleanup(*this, cleanup);
+
+ // Append the prepared cleanup prologue from above.
+ assert(!cir::MissingFeatures::cleanupAppendInsts());
+
+ // Optimistically hope that any fixups will continue falling through.
+ if (fixupDepth != ehStack.getNumBranchFixups())
+ cgm.errorNYI("cleanup fixup depth mismatch");
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!hasFallthrough && fallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!isActive);
+ cgm.errorNYI("cleanup inactive fallthrough");
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (hasFallthrough && fallthroughDest) {
+ cgm.errorNYI("cleanup fallthrough destination");
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (hasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ // FIXME(cir): should we clear insertion point here?
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ //
+ // If it did invalidate those pointers, and normalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ assert(!cir::MissingFeatures::simplifyCleanupEntry());
+ }
}
/// Pops cleanup blocks until the given savepoint is reached.
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
index 30f5607d655da..63ccc65e53dac 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -72,6 +72,18 @@ class EHScope {
/// A cleanup scope which generates the cleanup blocks lazily.
class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
: public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator enclosingNormal;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ mlir::Block *normalBlock = nullptr;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned fixupDepth = 0;
+
public:
/// Gets the size required for a lazy cleanup scope with the given
/// cleanup-data requirements.
@@ -83,7 +95,10 @@ class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
return sizeof(EHCleanupScope) + cleanupBits.cleanupSize;
}
- EHCleanupScope(unsigned cleanupSize) : EHScope(EHScope::Cleanup) {
+ EHCleanupScope(unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal)
+ : EHScope(EHScope::Cleanup), enclosingNormal(enclosingNormal),
+ fixupDepth(fixupDepth) {
// TODO(cir): When exception handling is upstreamed, isNormalCleanup and
// isEHCleanup will be arguments to the constructor.
cleanupBits.isNormalCleanup = true;
@@ -101,11 +116,19 @@ class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
// Objects of EHCleanupScope are not destructed. Use destroy().
~EHCleanupScope() = delete;
+ mlir::Block *getNormalBlock() const { return normalBlock; }
+ void setNormalBlock(mlir::Block *bb) { normalBlock = bb; }
+
bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
bool isActive() const { return cleanupBits.isActive; }
void setActive(bool isActive) { cleanupBits.isActive = isActive; }
+ unsigned getFixupDepth() const { return fixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return enclosingNormal;
+ }
+
size_t getCleanupSize() const { return cleanupBits.cleanupSize; }
void *getCleanupBuffer() { return this + 1; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 0d64c31f01668..c6e05c45ea3aa 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -60,11 +60,44 @@ class CIRGenFunction : public CIRGenTypeCache {
/// is where the next operations will be introduced.
CIRGenBuilderTy &builder;
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() = default;
+ JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
+ unsigned index = 0)
+ : block(block) {}
+
+ bool isValid() const { return block != nullptr; }
+ mlir::Block *getBlock() const { return block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
+ unsigned getDestIndex() const { return index; }
+
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ scopeDepth = depth;
+ }
+
+ private:
+ mlir::Block *block = nullptr;
+ EHScopeStack::stable_iterator scopeDepth;
+ unsigned index;
+ };
+
public:
/// The GlobalDecl for the current function being compiled or the global
/// variable currently being initialized.
clang::GlobalDecl curGD;
+ /// Unified return block.
+ /// In CIR this is a function because each scope might have
+ /// its associated return block.
+ JumpDest returnBlock(mlir::Block *retBlock) {
+ return getJumpDestInCurrentScope(retBlock);
+ }
+
+ unsigned nextCleanupDestIndex = 1;
+
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
@@ -574,6 +607,16 @@ class CIRGenFunction : public CIRGenTypeCache {
}
};
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ /// CIRGen: this mostly tracks state for figuring out the proper scope
+ /// information, no actual branches are emitted.
+ JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
+ return JumpDest(target, ehStack.getInnermostNormalCleanup(),
+ nextCleanupDestIndex++);
+ }
+
/// Perform the usual unary conversions on the specified expression and
/// compare the result against zero, returning an Int1Ty value.
mlir::Value evaluateExprAsBool(const clang::Expr *e);
@@ -1192,6 +1235,8 @@ class CIRGenFunction : public CIRGenTypeCache {
LValue emitBinaryOperatorLValue(const BinaryOperator *e);
+ cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
+
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 5ba64ddb85272..a09b6f672b0c3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -446,54 +446,88 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
mlir::Location loc = getLoc(s.getSourceRange());
const Expr *rv = s.getRetValue();
- if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
- s.getNRVOCandidate()->isNRVOVariable()) {
- assert(!cir::MissingFeatures::openMP());
- assert(!cir::MissingFeatures::nrvo());
- } else if (!rv) {
- // No return expression. Do nothing.
- } else if (rv->getType()->isVoidType()) {
- // Make sure not to return anything, but evaluate the expression
- // for side effects.
- if (rv) {
- emitAnyExpr(rv);
+ RunCleanupsScope cleanupScope(*this);
+ bool createNewScope = false;
+ if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
+ rv = ewc->getSubExpr();
+ createNewScope = true;
+ }
+
+ auto handleReturnVal = [&]() {
+ if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
+ s.getNRVOCandidate()->isNRVOVariable()) {
+ assert(!cir::MissingFeatures::openMP());
+ assert(!cir::MissingFeatures::nrvo());
+ } else if (!rv) {
+ // No return expression. Do nothing.
+ } else if (rv->getType()->isVoidType()) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (rv) {
+ emitAnyExpr(rv);
+ }
+ } else if (cast<FunctionDecl>(curGD.getDecl())
+ ->getReturnType()
+ ->isReferenceType()) {
+ // If this function returns a reference, take the address of the
+ // expression rather than the value.
+ RValue result = emitReferenceBindingToExpr(rv);
+ builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
+ *fnRetAlloca);
+ } else {
+ mlir::Value value = nullptr;
+ switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
+ case cir::TEK_Scalar:
+ value = emitScalarExpr(rv);
+ if (value) { // Change this to an assert once emitScalarExpr is complete
+ builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
+ }
+ break;
+ case cir::TEK_Complex:
+ getCIRGenModule().errorNYI(s.getSourceRange(),
+ "complex function return type");
+ break;
+ case cir::TEK_Aggregate:
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::IsNotAliased,
+ getOverlapForReturnValue()));
+ break;
+ }
}
- } else if (cast<FunctionDecl>(curGD.getDecl())
- ->getReturnType()
- ->isReferenceType()) {
- // If this function returns a reference, take the address of the
- // expression rather than the value.
- RValue result = emitReferenceBindingToExpr(rv);
- builder.CIRBaseBuilderTy::createStore(loc, result.getValue(), *fnRetAlloca);
+ };
+
+ if (!createNewScope) {
+ handleReturnVal();
} else {
- mlir::Value value = nullptr;
- switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
- case cir::TEK_Scalar:
- value = emitScalarExpr(rv);
- if (value) { // Change this to an assert once emitScalarExpr is complete
- builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
- }
- break;
- case cir::TEK_Complex:
- getCIRGenModule().errorNYI(s.getSourceRange(),
- "complex function return type");
- break;
- case cir::TEK_Aggregate:
- assert(!cir::MissingFeatures::aggValueSlotGC());
- emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::IsNotAliased,
- getOverlapForReturnValue()));
- break;
+ mlir::Location scopeLoc =
+ getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
+ // First create cir.scope and later emit it's body. Otherwise all CIRGen
+ // dispatched by `handleReturnVal()` might needs to manipulate blocks and
+ // look into parents, which are all unlinked.
+ mlir::OpBuilder::InsertPoint scopeBody;
+ cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ scopeBody = b.saveInsertionPoint();
+ });
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeBody);
+ CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
+ builder.getInsertionBlock()};
+ handleReturnVal();
}
}
+ cleanupScope.forceCleanup();
+
+ // In CIR we might have returns in different scopes.
+ // FIXME(cir): cleanup code is handling actual return emission, the logic
+ // should try to match traditional codegen more closely (to the extent which
+ // is possible).
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
- // This should emit a branch through the cleanup block if one exists.
- builder.create<cir::BrOp>(loc, retBlock);
- assert(!cir::MissingFeatures::emitBranchThroughCleanup());
- if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
+ emitBranchThroughCleanup(loc, returnBlock(retBlock));
// Insert the new block to continue codegen after branch to ret block.
builder.createBlock(builder.getBlock()->getParent());
@@ -1063,5 +1097,5 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
assert(!cir::MissingFeatures::emitBranchThroughCleanup());
builder.create<cir::BrOp>(loc, retBlock);
if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(loc, "return with cleanup stack");
+ cgm.errorNYI(loc, "return of r-value with cleanup stack");
}
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 67a72f5384c32..c346095f0c7f3 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -18,12 +18,38 @@
#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "llvm/ADT/SmallVector.h"
namespace clang::CIRGen {
class CIRGenFunction;
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ mlir::Block *optimisticBranchBlock = nullptr;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ mlir::Block *destination = nullptr;
+
+ /// The destination index value.
+ unsigned destinationIndex = 0;
+
+ /// The initial branch of the fixup.
+ cir::BrOp initialBranch = {};
+};
+
enum CleanupKind : unsigned {
/// Denotes a cleanup that should run when a scope is exited using exceptional
/// control flow (a throw statement leading to stack unwinding, ).
@@ -126,9 +152,31 @@ class EHScopeStack {
/// The first valid entry in the buffer.
char *startOfData = nullptr;
+ /// The innermost normal cleanup on the stack.
+ stable_iterator innermostNormalCleanup = stable_end();
+
/// The CGF this Stack belong to
CIRGenFunction *cgf = nullptr;
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup> branchFixups;
+
// This class uses a custom allocator for maximum efficiency because cleanups
// are allocated and freed very frequently. It's basically a bump pointer
// allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
@@ -158,6 +206,18 @@ class EHScopeStack {
/// Determines whether the exception-scopes stack is empty.
bool empty() const { return startOfData == endOfBuffer; }
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return innermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return innermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
/// An unstable reference to a scope-stack depth. Invalidated by
/// pushes but not pops.
class iterator;
@@ -172,12 +232,30 @@ class EHScopeStack {
return stable_iterator(endOfBuffer - startOfData);
}
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
+
/// Turn a stable reference to a scope depth into a unstable pointer
/// to the EH stack.
iterator find(stable_iterator savePoint) const;
- /// Create a stable reference to the bottom of the EH stack.
- static stable_iterator stable_end() { return stable_iterator(0); }
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ branchFixups.push_back(BranchFixup());
+ return branchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return branchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned i) {
+ assert(i < getNumBranchFixups());
+ return branchFixups[i];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
};
} // namespace clang::CIRGen
diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp
index 7fb09757a27bf..36253732dd86b 100644
--- a/clang/test/CIR/CodeGen/dtors.cpp
+++ b/clang/test/CIR/CodeGen/dtors.cpp
@@ -35,7 +35,7 @@ bool make_temp(const B &) { return false; }
bool test_temp_or() { return make_temp(1) || make_temp(2); }
// CIR: cir.func{{.*}} @_Z12test_temp_orv()
-// CIR: %[[SCOPE:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.*]] = cir.alloca !rec_B, !cir.ptr<!rec_B>, ["ref.tmp0"]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1>
// CIR: cir.call @_ZN1BC2Ei(%[[REF_TMP0]], %[[ONE]])
@@ -51,9 +51,9 @@ bool test_temp_or() { return make_temp(1) || make_temp(2); }
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP1]])
// CIR: cir.yield %[[MAKE_TEMP1]] : !cir.bool
// CIR: })
+// CIR: cir.store{{.*}} %[[TERNARY]], %[[RETVAL:.*]]
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP0]])
-// CIR: cir.yield %[[TERNARY]] : !cir.bool
-// CIR: } : !cir.bool
+// CIR: }
// LLVM: define{{.*}} i1 @_Z12test_temp_orv() {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
@@ -105,7 +105,7 @@ bool test_temp_or() { return make_temp(1) || make_temp(2); }
bool test_temp_and() { return make_temp(1) && make_temp(2); }
// CIR: cir.func{{.*}} @_Z13test_temp_andv()
-// CIR: %[[SCOPE:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.*]] = cir.alloca !rec_B, !cir.ptr<!rec_B>, ["ref.tmp0"]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1>
// CIR: cir.call @_ZN1BC2Ei(%[[REF_TMP0]], %[[ONE]])
@@ -121,9 +121,9 @@ bool test_temp_and() { return make_temp(1) && make_temp(2); }
// CIR: %[[FALSE:.*]] = cir.const #false
// CIR: cir.yield %[[FALSE]] : !cir.bool
// CIR: })
+// CIR: cir.store{{.*}} %[[TERNARY]], %[[RETVAL:.*]]
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP0]])
-// CIR: cir.yield %[[TERNARY]] : !cir.bool
-// CIR: } : !cir.bool
+// CIR: }
// LLVM: define{{.*}} i1 @_Z13test_temp_andv() {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp
index 033adc60be1ed..2f46aea8eff2a 100644
--- a/clang/test/CIR/CodeGen/lambda.cpp
+++ b/clang/test/CIR/CodeGen/lambda.cpp
@@ -219,14 +219,13 @@ int f() {
// CIR: cir.func dso_local @_Z1fv() -> !s32i
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[TMP:.*]] = cir.alloca ![[REC_LAM_G2]], !cir.ptr<![[REC_LAM_G2]]>, ["ref.tmp0"]
// CIR: %[[G2:.*]] = cir.call @_Z2g2v() : () -> ![[REC_LAM_G2]]
// CIR: cir.store{{.*}} %[[G2]], %[[TMP]]
// CIR: %[[RESULT:.*]] = cir.call @_ZZ2g2vENK3$_0clEv(%[[TMP]])
-// CIR: cir.yield %[[RESULT]]
+// CIR: cir.store{{.*}} %[[RESULT]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -255,10 +254,9 @@ int f() {
// LLVM: %[[G2:.*]] = call %[[REC_LAM_G2]] @_Z2g2v()
// LLVM: store %[[REC_LAM_G2]] %[[G2]], ptr %[[TMP]]
// LLVM: %[[RESULT:.*]] = call i32 @"_ZZ2g2vENK3$_0clEv"(ptr %[[TMP]])
+// LLVM: store i32 %[[RESULT]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[RESULT]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
@@ -333,14 +331,13 @@ struct A {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
// CIR: %[[THIS]] = cir.load deref %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_A]], !cir.ptr<![[REC_LAM_A]]>, ["ref.tmp0"]
// CIR: %[[STRUCT_A:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_A]]> -> !cir.ptr<!rec_A>
// CIR: cir.call @_ZN1AC1ERKS_(%[[STRUCT_A]], %[[THIS]]){{.*}} : (!cir.ptr<!rec_A>, !cir.ptr<!rec_A>){{.*}} -> ()
// CIR: %[[LAM_RET:.*]] = cir.call @_ZZN1A3fooEvENKUlvE_clEv(%[[LAM_ADDR]])
-// CIR: cir.yield %[[LAM_RET]]
+// CIR: cir.store{{.*}} %[[LAM_RET]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -355,10 +352,9 @@ struct A {
// LLVM: %[[STRUCT_A:.*]] = getelementptr %[[REC_LAM_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
// LLVM: call void @_ZN1AC1ERKS_(ptr %[[STRUCT_A]], ptr %[[THIS]])
// LLVM: %[[LAM_RET:.*]] = call i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM: store i32 %[[LAM_RET]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
@@ -407,14 +403,13 @@ struct A {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
// CIR: %[[THIS]] = cir.load %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_PTR_A]], !cir.ptr<![[REC_LAM_PTR_A]]>, ["ref.tmp0"]
// CIR: %[[A_ADDR_ADDR:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_PTR_A]]> -> !cir.ptr<!cir.ptr<!rec_A>>
// CIR: cir.store{{.*}} %[[THIS]], %[[A_ADDR_ADDR]]
// CIR: %[[LAM_RET:.*]] = cir.call @_ZZN1A3barEvENKUlvE_clEv(%[[LAM_ADDR]])
-// CIR: cir.yield %[[LAM_RET]]
+// CIR: cir.store{{.*}} %[[LAM_RET]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -429,10 +424,9 @@ struct A {
// LLVM: %[[A_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_PTR_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
// LLVM: store ptr %[[THIS]], ptr %[[A_ADDR_ADDR]]
// LLVM: %[[LAM_RET:.*]] = call i32 @_ZZN1A3barEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM: store i32 %[[LAM_RET]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
diff --git a/clang/test/CIR/CodeGen/statement-exprs.c b/clang/test/CIR/CodeGen/statement-exprs.c
index f6ec9ecd1b67e..c784ec9eda7d8 100644
--- a/clang/test/CIR/CodeGen/statement-exprs.c
+++ b/clang/test/CIR/CodeGen/statement-exprs.c
@@ -218,7 +218,7 @@ struct S { int x; };
int test3() { return ({ struct S s = {1}; s; }).x; }
// CIR: cir.func no_proto dso_local @test3() -> !s32i
// CIR: %[[RETVAL:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
-// CIR: %[[YIELDVAL:.+]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["ref.tmp0"]
// CIR: %[[TMP:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["tmp"]
// CIR: cir.scope {
@@ -230,9 +230,8 @@ int test3() { return ({ struct S s = {1}; s; }).x; }
// CIR: }
// CIR: %[[GEP_X_TMP:.+]] = cir.get_member %[[REF_TMP0]][0] {name = "x"} : !cir.ptr<!rec_S> -> !cir.ptr<!s32i>
// CIR: %[[XVAL:.+]] = cir.load {{.*}} %[[GEP_X_TMP]] : !cir.ptr<!s32i>, !s32i
-// CIR: cir.yield %[[XVAL]] : !s32i
-// CIR: } : !s32i
-// CIR: cir.store %[[YIELDVAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %[[XVAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
// CIR: %[[RES:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return %[[RES]] : !s32i
@@ -252,10 +251,9 @@ int test3() { return ({ struct S s = {1}; s; }).x; }
// LLVM: [[LBL8]]:
// LLVM: %[[GEP_VAR1:.+]] = getelementptr %struct.S, ptr %[[VAR1]], i32 0, i32 0
// LLVM: %[[LOAD_X:.+]] = load i32, ptr %[[GEP_VAR1]]
+// LLVM: store i32 %[[LOAD_X]], ptr %[[VAR4]]
// LLVM: br label %[[LBL11:.+]]
// LLVM: [[LBL11]]:
-// LLVM: %[[PHI:.+]] = phi i32 [ %[[LOAD_X]], %[[LBL8]] ]
-// LLVM: store i32 %[[PHI]], ptr %[[VAR4]]
// LLVM: %[[RES:.+]] = load i32, ptr %[[VAR4]]
// LLVM: ret i32 %[[RES]]
diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c
index e2adf457c7f69..6274ceb17573f 100644
--- a/clang/test/CIR/CodeGen/vla.c
+++ b/clang/test/CIR/CodeGen/vla.c
@@ -282,4 +282,61 @@ void f3(unsigned len) {
// break;
// }
// }
-
\ No newline at end of file
+
+int f5(unsigned long len) {
+ int arr[len];
+ return arr[2];
+}
+
+// CIR: cir.func{{.*}} @f5(%[[LEN_ARG:.*]]: !u64i {{.*}}) -> !s32i {
+// CIR: %[[LEN_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["len", init]
+// CIR: %[[RET_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[SAVED_STACK:.*]] = cir.alloca !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>, ["saved_stack"]
+// CIR: cir.store{{.*}} %[[LEN_ARG]], %[[LEN_ADDR]]
+// CIR: %[[LEN:.*]] = cir.load{{.*}} %[[LEN_ADDR]]
+// CIR: %[[STACK_PTR:.*]] = cir.stacksave
+// CIR: cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]]
+// CIR: %[[ARR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[LEN]] : !u64i, ["arr"]
+// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[ARR_2:.*]] = cir.ptr_stride %[[ARR]], %[[TWO]]
+// CIR: %[[ARR_VAL:.*]] = cir.load{{.*}} %[[ARR_2]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store{{.*}} %[[ARR_VAL]], %[[RET_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
+// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]]
+// CIR: %[[RET_VAL:.*]] = cir.load{{.*}} %[[RET_ADDR]]
+// CIR: cir.return %[[RET_VAL]] : !s32i
+
+// LLVM: define{{.*}} i32 @f5(i64 %[[LEN_ARG:.*]]) {
+// LLVM: %[[LEN_ADDR:.*]] = alloca i64
+// LLVM: %[[RET_ADDR:.*]] = alloca i32
+// LLVM: %[[SAVED_STACK:.*]] = alloca ptr
+// LLVM: store i64 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
+// LLVM: %[[LEN:.*]] = load i64, ptr %[[LEN_ADDR]]
+// LLVM: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0()
+// LLVM: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]]
+// LLVM: %[[ARR:.*]] = alloca i32, i64 %[[LEN]]
+// LLVM: %[[ARR_2:.*]] = getelementptr i32, ptr %[[ARR]], i64 2
+// LLVM: %[[ARR_VAL:.*]] = load i32, ptr %[[ARR_2]]
+// LLVM: store i32 %[[ARR_VAL]], ptr %[[RET_ADDR]]
+// LLVM: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]]
+// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]])
+// LLVM: %[[RET_VAL:.*]] = load i32, ptr %[[RET_ADDR]]
+// LLVM: ret i32 %[[RET_VAL]]
+
+// Note: VLA_EXPR0 below is emitted to capture debug info.
+
+// OGCG: define{{.*}} i32 @f5(i64 {{.*}} %[[LEN_ARG:.*]])
+// OGCG: %[[LEN_ADDR:.*]] = alloca i64
+// OGCG: %[[SAVED_STACK:.*]] = alloca ptr
+// OGCG: %[[VLA_EXPR0:.*]] = alloca i64
+// OGCG: store i64 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
+// OGCG: %[[LEN:.*]] = load i64, ptr %[[LEN_ADDR]]
+// OGCG: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0()
+// OGCG: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]]
+// OGCG: %[[ARR:.*]] = alloca i32, i64 %[[LEN]]
+// OGCG: store i64 %[[LEN]], ptr %[[VLA_EXPR0]]
+// OGCG: %[[ARR_2:.*]] = getelementptr inbounds i32, ptr %[[ARR]], i64 2
+// OGCG: %[[ARR_VAL:.*]] = load i32, ptr %[[ARR_2]]
+// OGCG: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]]
+// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]])
+// OGCG: ret i32 %[[ARR_VAL]]
More information about the cfe-commits
mailing list