[clang] [CIR] Upstream a basic version of class LexicalScope (PR #131945)
David Olsen via cfe-commits
cfe-commits at lists.llvm.org
Wed Mar 19 08:51:24 PDT 2025
https://github.com/dkolsen-pgi updated https://github.com/llvm/llvm-project/pull/131945
>From ef54ceca65c8a62544651cbbd30967efc7adec36 Mon Sep 17 00:00:00 2001
From: David Olsen <dolsen at nvidia.com>
Date: Tue, 18 Mar 2025 17:37:26 -0700
Subject: [PATCH 1/2] [CIR] Upstream a basic version of class LexicalScope
Upstream the parts of class `CIRGenFunction::LexicalScope` that
implement function return values. There is a bit of other functionality
in here, such as the implicit `cir.yield` at the end of a non-function
scope, but this is mostly about function returns.
The parts of `LexicalScope` that handle calling destructors, switch
statements, ternary expressions, and exception handling still need to be
upstreamed.
There is a change in the generated ClangIR (which is why many of the
tests needed updating). Return values are stored in the
compiler-generated variable `__retval` rather than being passed to the
`cir.return` op directly. But there should be no change in the behavior
of the generated code.
---
clang/include/clang/CIR/Dialect/IR/CIROps.td | 16 ++
clang/include/clang/CIR/MissingFeatures.h | 1 +
clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 219 +++++++++++++++----
clang/lib/CIR/CodeGen/CIRGenFunction.h | 156 ++++++++++++-
clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 33 ++-
clang/test/CIR/CodeGen/basic.c | 46 +++-
clang/test/CIR/CodeGen/basic.cpp | 20 +-
clang/test/CIR/CodeGen/cast.cpp | 9 +-
clang/test/CIR/CodeGen/unary.cpp | 15 ++
clang/test/CIR/Lowering/basic.cpp | 20 +-
clang/test/CIR/Lowering/func-simple.cpp | 51 +++--
clang/test/CIR/func-simple.cpp | 74 +++++--
12 files changed, 544 insertions(+), 116 deletions(-)
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 2a5caf1bf1f63..352d72ff31a8a 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -832,6 +832,22 @@ def FuncOp : CIR_Op<"func", [
let hasVerifier = 1;
}
+//===----------------------------------------------------------------------===//
+// UnreachableOp
+//===----------------------------------------------------------------------===//
+
+def UnreachableOp : CIR_Op<"unreachable", [Terminator]> {
+ let summary = "invoke immediate undefined behavior";
+ let description = [{
+ If the program control flow reaches a `cir.unreachable` operation, the
+ program exhibits undefined behavior immediately. This operation is useful
+ in cases where the unreachability of a program point needs to be explicitly
+ marked.
+ }];
+
+ let assemblyFormat = "attr-dict";
+}
+
//===----------------------------------------------------------------------===//
// TrapOp
//===----------------------------------------------------------------------===//
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 6adff30f5c91a..e37bff1f548c9 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -89,6 +89,7 @@ struct MissingFeatures {
static bool astVarDeclInterface() { return false; }
static bool stackSaveOp() { return false; }
static bool aggValueSlot() { return false; }
+ static bool generateDebugInfo() { return false; }
static bool fpConstraints() { return false; }
static bool sanitizers() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 2338ec9cd952a..5685339c9e637 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -135,6 +135,13 @@ mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
}
+void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
+ CharUnits alignment) {
+ if (!type->isVoidType()) {
+ fnRetAlloca = emitAlloca("__retval", convertType(type), loc, alignment);
+ }
+}
+
void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
mlir::Location loc, CharUnits alignment,
bool isParam) {
@@ -149,6 +156,118 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
}
+void CIRGenFunction::LexicalScope::cleanup() {
+ CIRGenBuilderTy &builder = cgf.builder;
+ LexicalScope *localScope = cgf.currLexScope;
+
+ if (returnBlock != nullptr) {
+ // Write out the return block, which loads the value from `__retval` and
+ // issues the `cir.return`.
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToEnd(returnBlock);
+ (void)emitReturn(*returnLoc);
+ }
+
+ mlir::Block *currBlock = builder.getBlock();
+ if (isGlobalInit() && !currBlock)
+ return;
+ if (currBlock->mightHaveTerminator() && currBlock->getTerminator())
+ return;
+
+ // Get rid of any empty block at the end of the scope.
+ bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
+ if (!entryBlock && currBlock->empty()) {
+ currBlock->erase();
+ if (returnBlock != nullptr && returnBlock->getUses().empty())
+ returnBlock->erase();
+ return;
+ }
+
+ // Reached the end of the scope.
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToEnd(currBlock);
+
+ if (localScope->depth == 0) {
+ // Reached the end of the function.
+ if (returnBlock != nullptr) {
+ if (returnBlock->getUses().empty())
+ returnBlock->erase();
+ else {
+ builder.create<cir::BrOp>(*returnLoc, returnBlock);
+ return;
+ }
+ }
+ emitImplicitReturn();
+ return;
+ }
+ // Reached the end of a non-function scope. Some scopes, such as those
+ // used with the ?: operator, can return a value.
+ if (!localScope->isTernary() && !currBlock->mightHaveTerminator()) {
+ !retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
+ : builder.create<cir::YieldOp>(localScope->endLoc, retVal);
+ }
+ }
+}
+
+cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+
+ if (!cgf.curFn.getFunctionType().hasVoidReturn()) {
+ // Load the value from `__retval` and return it via the `cir.return` op.
+ auto value = builder.create<cir::LoadOp>(
+ loc, cgf.curFn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
+ return builder.create<cir::ReturnOp>(loc,
+ llvm::ArrayRef(value.getResult()));
+ }
+ return builder.create<cir::ReturnOp>(loc);
+}
+
+// This is copyied from CodeGenModule::MayDropFunctionReturn. This is a
+// candidate for sharing between CIRGen and CodeGen.
+static bool mayDropFunctionReturn(const ASTContext &astContext,
+ QualType returnType) {
+ // We can't just discard the return value for a record type with a complex
+ // destructor or a non-trivially copyable type.
+ if (const RecordType *recordType =
+ returnType.getCanonicalType()->getAs<RecordType>()) {
+ if (const auto *classDecl = dyn_cast<CXXRecordDecl>(recordType->getDecl()))
+ return classDecl->hasTrivialDestructor();
+ }
+ return returnType.isTriviallyCopyableType(astContext);
+}
+
+void CIRGenFunction::LexicalScope::emitImplicitReturn() {
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ LexicalScope *localScope = cgf.currLexScope;
+
+ const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
+
+ // In C++, flowing off the end of a non-void function is always undefined
+ // behavior. In C, flowing off the end of a non-void function is undefined
+ // behavior only if the non-existent return value is used by the caller.
+ // That influences whether the terminating op is trap, unreachable, or
+ // return.
+ if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
+ !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
+ builder.getInsertionBlock()) {
+ bool shouldEmitUnreachable =
+ cgf.cgm.getCodeGenOpts().StrictReturn ||
+ !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
+
+ if (shouldEmitUnreachable) {
+ if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
+ builder.create<cir::TrapOp>(localScope->endLoc);
+ else
+ builder.create<cir::UnreachableOp>(localScope->endLoc);
+ builder.clearInsertionPoint();
+ return;
+ }
+ }
+
+ (void)emitReturn(localScope->endLoc);
+}
+
void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
cir::FuncOp fn, cir::FuncType funcType,
FunctionArgList args, SourceLocation loc,
@@ -156,7 +275,6 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
assert(!curFn &&
"CIRGenFunction can only be used for one function at a time");
- fnRetTy = returnType;
curFn = fn;
const auto *fd = dyn_cast_or_null<FunctionDecl>(gd.getDecl());
@@ -194,6 +312,12 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
}
assert(builder.getInsertionBlock() && "Should be valid");
+
+ // When the current function is not void, create an address to store the
+ // result value.
+ if (!returnType->isVoidType())
+ emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
+ getContext().getTypeAlignInChars(returnType));
}
void CIRGenFunction::finishFunction(SourceLocation endLoc) {}
@@ -208,9 +332,24 @@ mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
return result;
}
+static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
+ // Remove any leftover blocks that are unreachable and empty, since they do
+ // not represent unreachable code useful for warnings nor anything deemed
+ // useful in general.
+ SmallVector<mlir::Block *> blocksToDelete;
+ for (mlir::Block &block : func.getBlocks()) {
+ if (block.empty() && block.getUses().empty())
+ blocksToDelete.push_back(&block);
+ }
+ for (mlir::Block *block : blocksToDelete)
+ block->erase();
+}
+
cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
cir::FuncType funcType) {
const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
+ curGD = gd;
+
SourceLocation loc = funcDecl->getLocation();
Stmt *body = funcDecl->getBody();
SourceRange bodyRange =
@@ -219,55 +358,53 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
: builder.getUnknownLoc()};
- // This will be used once more code is upstreamed.
- [[maybe_unused]] mlir::Block *entryBB = fn.addEntryBlock();
+ auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
+ return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
+ };
+ const mlir::Location fusedLoc = mlir::FusedLoc::get(
+ &getMLIRContext(),
+ {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
+ mlir::Block *entryBB = fn.addEntryBlock();
FunctionArgList args;
QualType retTy = buildFunctionArgList(gd, args);
- startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
-
- if (isa<CXXDestructorDecl>(funcDecl))
- getCIRGenModule().errorNYI(bodyRange, "C++ destructor definition");
- else if (isa<CXXConstructorDecl>(funcDecl))
- getCIRGenModule().errorNYI(bodyRange, "C++ constructor definition");
- else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
- funcDecl->hasAttr<CUDAGlobalAttr>())
- getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
- else if (isa<CXXMethodDecl>(funcDecl) &&
- cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker())
- getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker");
- else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
- (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
- cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator()))
- getCIRGenModule().errorNYI(bodyRange, "Default assignment operator");
- else if (body) {
- if (mlir::failed(emitFunctionBody(body))) {
- fn.erase();
- return nullptr;
- }
- } else
- llvm_unreachable("no definition for normal function");
-
- // This code to insert a cir.return or cir.trap at the end of the function is
- // temporary until the function return code, including
- // CIRGenFunction::LexicalScope::emitImplicitReturn(), is upstreamed.
- mlir::Block &lastBlock = fn.getRegion().back();
- if (lastBlock.empty() || !lastBlock.mightHaveTerminator() ||
- !lastBlock.getTerminator()->hasTrait<mlir::OpTrait::IsTerminator>()) {
- builder.setInsertionPointToEnd(&lastBlock);
- if (mlir::isa<cir::VoidType>(funcType.getReturnType())) {
- builder.create<cir::ReturnOp>(getLoc(bodyRange.getEnd()));
+ {
+ LexicalScope lexScope(*this, fusedLoc, entryBB);
+
+ startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
+
+ if (isa<CXXDestructorDecl>(funcDecl))
+ getCIRGenModule().errorNYI(bodyRange, "C++ destructor definition");
+ else if (isa<CXXConstructorDecl>(funcDecl))
+ getCIRGenModule().errorNYI(bodyRange, "C++ constructor definition");
+ else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
+ funcDecl->hasAttr<CUDAGlobalAttr>())
+ getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
+ else if (isa<CXXMethodDecl>(funcDecl) &&
+ cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker())
+ getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker");
+ else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
+ (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
+ cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator()))
+ getCIRGenModule().errorNYI(bodyRange, "Default assignment operator");
+ else if (body) {
+ if (mlir::failed(emitFunctionBody(body))) {
+ fn.erase();
+ return nullptr;
+ }
} else {
- builder.create<cir::TrapOp>(getLoc(bodyRange.getEnd()));
+ // Anything without a body should have been handled above.
+ llvm_unreachable("no definition for normal function");
}
- }
- if (mlir::failed(fn.verifyBody()))
- return nullptr;
+ if (mlir::failed(fn.verifyBody()))
+ return nullptr;
- finishFunction(bodyRange.getEnd());
+ finishFunction(bodyRange.getEnd());
+ }
+ eraseEmptyAndUnusedBlocks(fn);
return fn;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index f931da32d51db..b52f5ec734f70 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -49,11 +49,15 @@ class CIRGenFunction : public CIRGenTypeCache {
CIRGenBuilderTy &builder;
public:
- clang::QualType fnRetTy;
+ /// The GlobalDecl for the current function being compiled or the global
+ /// variable currently being initialized.
+ clang::GlobalDecl curGD;
- /// This is the current function or global initializer that is generated code
- /// for.
- mlir::Operation *curFn = nullptr;
+ /// The compiler-generated variable that holds the return value.
+ std::optional<mlir::Value> fnRetAlloca;
+
+ /// The function for which code is currently being generated.
+ cir::FuncOp curFn;
using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
/// This keeps track of the CIR allocas or globals for local C
@@ -67,15 +71,15 @@ class CIRGenFunction : public CIRGenTypeCache {
CIRGenModule &getCIRGenModule() { return cgm; }
const CIRGenModule &getCIRGenModule() const { return cgm; }
- mlir::Block *getCurFunctionEntryBlock() {
- auto fn = mlir::dyn_cast<cir::FuncOp>(curFn);
- assert(fn && "other callables NYI");
- return &fn.getRegion().front();
- }
+ mlir::Block *getCurFunctionEntryBlock() { return &curFn.getRegion().front(); }
/// Sanitizers enabled for this function.
clang::SanitizerSet sanOpts;
+ /// Whether or not a Microsoft-style asm block has been processed within
+ /// this fuction. These can potentially set the return value.
+ bool sawAsmBlock = false;
+
mlir::Type convertTypeForMem(QualType T);
mlir::Type convertType(clang::QualType T);
@@ -131,6 +135,9 @@ class CIRGenFunction : public CIRGenTypeCache {
~VarDeclContext() { restore(); }
};
+ void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
+ clang::CharUnits alignment);
+
public:
/// Use to track source locations across nested visitor traversals.
/// Always use a `SourceLocRAIIObject` to change currSrcLoc.
@@ -330,9 +337,140 @@ class CIRGenFunction : public CIRGenTypeCache {
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// Represents a scope, including function bodies, compound statements, and
+ /// the substatements of if/while/do/for/switch/try statements. This class
+ /// handles any automatic cleanup, along with the return value.
+ struct LexicalScope {
+ private:
+ // TODO(CIR): This will live in the base class RunCleanupScope once that
+ // class is upstreamed.
+ CIRGenFunction &cgf;
+
+ // Block containing cleanup code for things initialized in this lexical
+ // context (scope).
+ mlir::Block *cleanupBlock = nullptr;
+
+ // Points to the scope entry block. This is useful, for instance, for
+ // helping to insert allocas before finalizing any recursive CodeGen from
+ // switches.
+ mlir::Block *entryBlock;
+
+ LexicalScope *parentScope = nullptr;
+
+ // Only Regular is used at the moment. Support for other kinds will be
+ // added as the relevant statements/expressions are upstreamed.
+ enum Kind {
+ Regular, // cir.if, cir.scope, if_regions
+ Ternary, // cir.ternary
+ Switch, // cir.switch
+ Try, // cir.try
+ GlobalInit // cir.global initialization code
+ };
+ Kind scopeKind = Kind::Regular;
+
+ // The scope return value.
+ mlir::Value retVal = nullptr;
+
+ mlir::Location beginLoc;
+ mlir::Location endLoc;
+
+ public:
+ unsigned depth = 0;
+
+ LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
+ : cgf(cgf), entryBlock(eb), parentScope(cgf.currLexScope),
+ beginLoc(loc), endLoc(loc) {
+
+ assert(entryBlock && "LexicalScope requires an entry block");
+ cgf.currLexScope = this;
+ if (parentScope)
+ ++depth;
+
+ if (const auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(loc)) {
+ assert(fusedLoc.getLocations().size() == 2 && "too many locations");
+ beginLoc = fusedLoc.getLocations()[0];
+ endLoc = fusedLoc.getLocations()[1];
+ }
+ }
+
+ void setRetVal(mlir::Value v) { retVal = v; }
+
+ void cleanup();
+ void restore() { cgf.currLexScope = parentScope; }
+
+ ~LexicalScope() {
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ cleanup();
+ restore();
+ }
+
+ // ---
+ // Kind
+ // ---
+ bool isGlobalInit() { return scopeKind == Kind::GlobalInit; }
+ bool isRegular() { return scopeKind == Kind::Regular; }
+ bool isSwitch() { return scopeKind == Kind::Switch; }
+ bool isTernary() { return scopeKind == Kind::Ternary; }
+ bool isTry() { return scopeKind == Kind::Try; }
+
+ void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
+ void setAsSwitch() { scopeKind = Kind::Switch; }
+ void setAsTernary() { scopeKind = Kind::Ternary; }
+
+ // ---
+ // Return handling.
+ // ---
+
+ private:
+ // `returnBlock`, `returnLoc`, and all the functions that deal with them
+ // will change and become more complicated when `switch` statements are
+ // upstreamed. `case` statements within the `switch` are in the same scope
+ // but have their own regions. Therefore the LexicalScope will need to
+ // keep track of multiple return blocks.
+ mlir::Block *returnBlock = nullptr;
+ std::optional<mlir::Location> returnLoc;
+
+ // See the comment on `getOrCreateRetBlock`.
+ mlir::Block *createRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
+ assert(returnBlock == nullptr && "only one return block per scope");
+ // Create the cleanup block but don't hook it up just yet.
+ mlir::OpBuilder::InsertionGuard guard(cgf.builder);
+ returnBlock =
+ cgf.builder.createBlock(cgf.builder.getBlock()->getParent());
+ updateRetLoc(returnBlock, loc);
+ return returnBlock;
+ }
+
+ cir::ReturnOp emitReturn(mlir::Location loc);
+ void emitImplicitReturn();
+
+ public:
+ mlir::Block *getRetBlock() { return returnBlock; }
+ mlir::Location getRetLoc(mlir::Block *b) { return *returnLoc; }
+ void updateRetLoc(mlir::Block *b, mlir::Location loc) { returnLoc = loc; }
+
+ // Create the return block for this scope, or return the existing one.
+ // This get-or-create logic is necessary to handle multiple return
+ // statements within the same scope, which can happen if some of them are
+ // dead code or if there is a `goto` into the middle of the scope.
+ mlir::Block *getOrCreateRetBlock(CIRGenFunction &cgf, mlir::Location loc) {
+ if (returnBlock == nullptr) {
+ returnBlock = createRetBlock(cgf, loc);
+ return returnBlock;
+ }
+ updateRetLoc(returnBlock, loc);
+ return returnBlock;
+ }
+
+ mlir::Block *getEntryBlock() { return entryBlock; }
+ };
+
+ LexicalScope *currLexScope = nullptr;
+
Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
const Twine &name = "tmp");
};
+
} // namespace clang::CIRGen
#endif
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 7a38f9838b290..d4bc2db24d95c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -30,20 +30,16 @@ void CIRGenFunction::emitCompoundStmtWithoutScope(const CompoundStmt &s) {
void CIRGenFunction::emitCompoundStmt(const CompoundStmt &s) {
mlir::Location scopeLoc = getLoc(s.getSourceRange());
- auto scope = builder.create<cir::ScopeOp>(
+ mlir::OpBuilder::InsertPoint scopeInsPt;
+ builder.create<cir::ScopeOp>(
scopeLoc, [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
- emitCompoundStmtWithoutScope(s);
+ scopeInsPt = b.saveInsertionPoint();
});
-
- // This code to insert a cir.yield at the end of the scope is temporary until
- // CIRGenFunction::LexicalScope::cleanup() is upstreamed.
- if (!scope.getRegion().empty()) {
- mlir::Block &lastBlock = scope.getRegion().back();
- if (lastBlock.empty() || !lastBlock.mightHaveTerminator() ||
- !lastBlock.getTerminator()->hasTrait<mlir::OpTrait::IsTerminator>()) {
- builder.setInsertionPointToEnd(&lastBlock);
- builder.create<cir::YieldOp>(getLoc(s.getEndLoc()));
- }
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeInsPt);
+ LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
+ emitCompoundStmtWithoutScope(s);
}
}
@@ -251,16 +247,15 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
"named return value optimization");
} else if (!rv) {
// No return expression. Do nothing.
- // TODO(CIR): In the future when function returns are fully implemented,
- // this section will do nothing. But for now a ReturnOp is necessary.
- builder.create<ReturnOp>(loc);
} else if (rv->getType()->isVoidType()) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
if (rv) {
emitAnyExpr(rv);
}
- } else if (fnRetTy->isReferenceType()) {
+ } else if (cast<FunctionDecl>(curGD.getDecl())
+ ->getReturnType()
+ ->isReferenceType()) {
getCIRGenModule().errorNYI(s.getSourceRange(),
"function return type that is a reference");
} else {
@@ -269,7 +264,7 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
case cir::TEK_Scalar:
value = emitScalarExpr(rv);
if (value) { // Change this to an assert once emitScalarExpr is complete
- builder.create<ReturnOp>(loc, llvm::ArrayRef(value));
+ builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
}
break;
default:
@@ -279,5 +274,9 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
}
+ auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc);
+ builder.create<cir::BrOp>(loc, retBlock);
+ builder.createBlock(builder.getBlock()->getParent());
+
return mlir::success();
}
diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c
index 754f11f1361ba..01e58398a6438 100644
--- a/clang/test/CIR/CodeGen/basic.c
+++ b/clang/test/CIR/CodeGen/basic.c
@@ -15,17 +15,27 @@ int f1(int i) {
// CIR: module
// CIR-NEXT: cir.func @f1(%arg0: !cir.int<s, 32> loc({{.*}})) -> !cir.int<s, 32>
// CIR-NEXT: %[[I_PTR:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["i", init] {alignment = 4 : i64}
+// CIR-NEXT: %[[RV:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CIR-NEXT: cir.store %arg0, %[[I_PTR]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
// CIR-NEXT: %[[I_IGNORED:.*]] = cir.load %[[I_PTR]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
// CIR-NEXT: %[[I:.*]] = cir.load %[[I_PTR]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
-// CIR-NEXT: cir.return %[[I]] : !cir.int<s, 32>
+// CIR-NEXT: cir.store %[[I]], %[[RV]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CIR-NEXT: cir.br ^[[BB1:[^ ]+]]
+// CIR-NEXT: ^[[BB1]]:
+// CIR-NEXT: %[[R:.*]] = cir.load %[[RV]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CIR-NEXT: cir.return %[[R]] : !cir.int<s, 32>
-// LLVM: define i32 @f1(i32 %[[I:.*]])
+// LLVM: define i32 @f1(i32 %[[IP:.*]])
// LLVM-NEXT: %[[I_PTR:.*]] = alloca i32, i64 1, align 4
-// LLVM-NEXT: store i32 %[[I]], ptr %[[I_PTR]], align 4
+// LLVM-NEXT: %[[RV:.*]] = alloca i32, i64 1, align 4
+// LLVM-NEXT: store i32 %[[IP]], ptr %[[I_PTR]], align 4
// LLVM-NEXT: %[[I_IGNORED:.*]] = load i32, ptr %[[I_PTR]], align 4
// LLVM-NEXT: %[[I:.*]] = load i32, ptr %[[I_PTR]], align 4
-// LLVM-NEXT: ret i32 %[[I]]
+// LLVM-NEXT: store i32 %[[I]], ptr %[[RV]], align 4
+// LLVM-NEXT: br label %[[BB1:.*]]
+// LLVM: [[BB1]]:
+// LLVM-NEXT: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// LLVM-NEXT: ret i32 %[[R]]
// OGCG: define{{.*}} i32 @f1(i32 noundef %[[I:.*]])
// OGCG-NEXT: entry:
@@ -38,11 +48,21 @@ int f1(int i) {
int f2(void) { return 3; }
// CIR: cir.func @f2() -> !cir.int<s, 32>
+// CIR-NEXT: %[[RV:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CIR-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> : !cir.int<s, 32>
-// CIR-NEXT: cir.return %[[THREE]] : !cir.int<s, 32>
+// CIR-NEXT: cir.store %[[THREE]], %[[RV]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CIR-NEXT: cir.br ^[[BB1:[^ ]+]]
+// CIR-NEXT: ^[[BB1]]:
+// CIR-NEXT: %[[R:.*]] = cir.load %0 : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CIR-NEXT: cir.return %[[R]] : !cir.int<s, 32>
// LLVM: define i32 @f2()
-// LLVM-NEXT: ret i32 3
+// LLVM-NEXT: %[[RV:.*]] = alloca i32, i64 1, align 4
+// LLVM-NEXT: store i32 3, ptr %[[RV]], align 4
+// LLVM-NEXT: br label %[[BB1:.*]]
+// LLVM: [[BB1]]:
+// LLVM-NEXT: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// LLVM-NEXT: ret i32 %[[R]]
// OGCG: define{{.*}} i32 @f2()
// OGCG-NEXT: entry:
@@ -54,17 +74,27 @@ int f3(void) {
}
// CIR: cir.func @f3() -> !cir.int<s, 32>
+// CIR-NEXT: %[[RV:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CIR-NEXT: %[[I_PTR:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["i", init] {alignment = 4 : i64}
// CIR-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> : !cir.int<s, 32>
// CIR-NEXT: cir.store %[[THREE]], %[[I_PTR]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
// CIR-NEXT: %[[I:.*]] = cir.load %[[I_PTR]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
-// CIR-NEXT: cir.return %[[I]] : !cir.int<s, 32>
+// CIR-NEXT: cir.store %[[I]], %[[RV]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CIR-NEXT: cir.br ^[[BB1:[^ ]+]]
+// CIR-NEXT: ^[[BB1]]:
+// CIR-NEXT: %[[R:.*]] = cir.load %[[RV]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CIR-NEXT: cir.return %[[R]] : !cir.int<s, 32>
// LLVM: define i32 @f3()
+// LLVM-NEXT: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM-NEXT: %[[I_PTR:.*]] = alloca i32, i64 1, align 4
// LLVM-NEXT: store i32 3, ptr %[[I_PTR]], align 4
// LLVM-NEXT: %[[I:.*]] = load i32, ptr %[[I_PTR]], align 4
-// LLVM-NEXT: ret i32 %[[I]]
+// LLVM-NEXT: store i32 %[[I]], ptr %[[RV]], align 4
+// LLVM-NEXT: br label %[[BB1:.*]]
+// LLVM: [[BB1]]:
+// LLVM-NEXT: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// LLVM-NEXT: ret i32 %[[R]]
// OGCG: define{{.*}} i32 @f3
// OGCG-NEXT: entry:
diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp
index ef922cc2b46fc..04687c340843a 100644
--- a/clang/test/CIR/CodeGen/basic.cpp
+++ b/clang/test/CIR/CodeGen/basic.cpp
@@ -7,9 +7,12 @@ int f1() {
// CHECK: module
// CHECK: cir.func @f1() -> !cir.int<s, 32>
+// CHECK: %[[RV:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CHECK: %[[I_PTR:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["i"] {alignment = 4 : i64}
// CHECK: %[[I:.*]] = cir.load %[[I_PTR]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
-// CHECK: cir.return %[[I]] : !cir.int<s, 32>
+// CHECK: cir.store %[[I]], %[[RV]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CHECK: %[[R:.*]] = cir.load %[[RV]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CHECK: cir.return %[[R]] : !cir.int<s, 32>
int f2() {
const int i = 2;
@@ -17,11 +20,14 @@ int f2() {
}
// CHECK: cir.func @f2() -> !cir.int<s, 32>
+// CHECK: %[[RV:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CHECK: %[[I_PTR:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["i", init, const] {alignment = 4 : i64}
// CHECK: %[[TWO:.*]] = cir.const #cir.int<2> : !cir.int<s, 32>
// CHECK: cir.store %[[TWO]], %[[I_PTR]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
// CHECK: %[[I:.*]] = cir.load %[[I_PTR]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
-// CHECK: cir.return %[[I]] : !cir.int<s, 32>
+// CHECK: cir.store %[[I]], %[[RV]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CHECK: %[[R:.*]] = cir.load %[[RV]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CHECK: cir.return %[[R]] : !cir.int<s, 32>
int f3(int i) {
return i;
@@ -29,9 +35,12 @@ int f3(int i) {
// CHECK: cir.func @f3(%[[ARG:.*]]: !cir.int<s, 32> loc({{.*}})) -> !cir.int<s, 32>
// CHECK: %[[ARG_ALLOCA:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["i", init] {alignment = 4 : i64}
+// CHECK: %[[RV:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CHECK: cir.store %[[ARG]], %[[ARG_ALLOCA]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
// CHECK: %[[ARG_VAL:.*]] = cir.load %[[ARG_ALLOCA]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
-// CHECK: cir.return %[[ARG_VAL]] : !cir.int<s, 32>
+// CHECK: cir.store %[[ARG_VAL]], %[[RV]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CHECK: %[[R:.*]] = cir.load %[[RV]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CHECK: cir.return %[[R]] : !cir.int<s, 32>
int f4(const int i) {
return i;
@@ -39,6 +48,9 @@ int f4(const int i) {
// CHECK: cir.func @f4(%[[ARG:.*]]: !cir.int<s, 32> loc({{.*}})) -> !cir.int<s, 32>
// CHECK: %[[ARG_ALLOCA:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["i", init, const] {alignment = 4 : i64}
+// CHECK: %[[RV:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CHECK: cir.store %[[ARG]], %[[ARG_ALLOCA]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
// CHECK: %[[ARG_VAL:.*]] = cir.load %[[ARG_ALLOCA]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
-// CHECK: cir.return %[[ARG_VAL]] : !cir.int<s, 32>
+// CHECK: cir.store %[[ARG_VAL]], %[[RV]] : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CHECK: %[[R:.*]] = cir.load %[[RV]] : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CHECK: cir.return %[[R]] : !cir.int<s, 32>
diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp
index b25a0cdb4b055..4d9e364b3c186 100644
--- a/clang/test/CIR/CodeGen/cast.cpp
+++ b/clang/test/CIR/CodeGen/cast.cpp
@@ -9,16 +9,21 @@ unsigned char cxxstaticcast_0(unsigned int x) {
// CIR: cir.func @cxxstaticcast_0
// CIR: %[[XPTR:[0-9]+]] = cir.alloca !cir.int<u, 32>, !cir.ptr<!cir.int<u, 32>>, ["x", init] {alignment = 4 : i64}
+// CIR: %[[RV:[0-9]+]] = cir.alloca !cir.int<u, 8>, !cir.ptr<!cir.int<u, 8>>, ["__retval"] {alignment = 1 : i64}
// CIR: cir.store %arg0, %[[XPTR]] : !cir.int<u, 32>, !cir.ptr<!cir.int<u, 32>>
// CIR: %[[XVAL:[0-9]+]] = cir.load %[[XPTR]] : !cir.ptr<!cir.int<u, 32>>, !cir.int<u, 32>
// CIR: %[[CASTED:[0-9]+]] = cir.cast(integral, %[[XVAL]] : !cir.int<u, 32>), !cir.int<u, 8>
-// CIR: cir.return %[[CASTED]] : !cir.int<u, 8>
+// CIR: cir.store %[[CASTED]], %[[RV]] : !cir.int<u, 8>, !cir.ptr<!cir.int<u, 8>>
+// CIR: %[[R:[0-9]+]] = cir.load %1 : !cir.ptr<!cir.int<u, 8>>, !cir.int<u, 8>
+// CIR: cir.return %[[R]] : !cir.int<u, 8>
// CIR: }
// LLVM: define i8 @cxxstaticcast_0(i32 %{{[0-9]+}})
// LLVM: %[[LOAD:[0-9]+]] = load i32, ptr %{{[0-9]+}}, align 4
// LLVM: %[[TRUNC:[0-9]+]] = trunc i32 %[[LOAD]] to i8
-// LLVM: ret i8 %[[TRUNC]]
+// LLVM: store i8 %[[TRUNC]], ptr %[[RV:[0-9]+]], align 1
+// LLVM: %[[R:[0-9]+]] = load i8, ptr %[[RV]], align 1
+// LLVM: ret i8 %[[R]]
int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) {
diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp
index 5d93587463562..924f4188199ab 100644
--- a/clang/test/CIR/CodeGen/unary.cpp
+++ b/clang/test/CIR/CodeGen/unary.cpp
@@ -16,6 +16,7 @@ unsigned up0() {
// CHECK: %[[OUTPUT:.*]] = cir.unary(plus, %[[INPUT]])
// LLVM: define i32 @up0()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
@@ -36,6 +37,7 @@ unsigned um0() {
// CHECK: %[[OUTPUT:.*]] = cir.unary(minus, %[[INPUT]])
// LLVM: define i32 @um0()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
@@ -58,6 +60,7 @@ unsigned un0() {
// CHECK: %[[OUTPUT:.*]] = cir.unary(not, %[[INPUT]])
// LLVM: define i32 @un0()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
@@ -85,6 +88,7 @@ int inc0() {
// CHECK: %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
// LLVM: define i32 @inc0()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
@@ -112,6 +116,7 @@ int dec0() {
// CHECK: %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
// LLVM: define i32 @dec0()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
@@ -139,6 +144,7 @@ int inc1() {
// CHECK: %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
// LLVM: define i32 @inc1()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
@@ -166,6 +172,7 @@ int dec1() {
// CHECK: %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
// LLVM: define i32 @dec1()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
@@ -196,6 +203,7 @@ int inc2() {
// CHECK: %[[B_TO_OUTPUT:.*]] = cir.load %[[B]]
// LLVM: define i32 @inc2()
+// LLVM: %[[RV:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[A:.*]] = alloca i32, i64 1, align 4
// LLVM: %[[B:.*]] = alloca i32, i64 1, align 4
// LLVM: store i32 1, ptr %[[A]], align 4
@@ -226,6 +234,7 @@ float fpPlus() {
// CHECK: %[[OUTPUT:.*]] = cir.unary(plus, %[[INPUT]])
// LLVM: define float @fpPlus()
+// LLVM: %[[RV:.*]] = alloca float, i64 1, align 4
// LLVM: %[[A:.*]] = alloca float, i64 1, align 4
// LLVM: store float 1.000000e+00, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
@@ -246,6 +255,7 @@ float fpMinus() {
// CHECK: %[[OUTPUT:.*]] = cir.unary(minus, %[[INPUT]])
// LLVM: define float @fpMinus()
+// LLVM: %[[RV:.*]] = alloca float, i64 1, align 4
// LLVM: %[[A:.*]] = alloca float, i64 1, align 4
// LLVM: store float 1.000000e+00, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
@@ -270,6 +280,7 @@ float fpPreInc() {
// CHECK: %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
// LLVM: define float @fpPreInc()
+// LLVM: %[[RV:.*]] = alloca float, i64 1, align 4
// LLVM: %[[A:.*]] = alloca float, i64 1, align 4
// LLVM: store float 1.000000e+00, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
@@ -294,6 +305,7 @@ float fpPreDec() {
// CHECK: %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
// LLVM: define float @fpPreDec()
+// LLVM: %[[RV:.*]] = alloca float, i64 1, align 4
// LLVM: %[[A:.*]] = alloca float, i64 1, align 4
// LLVM: store float 1.000000e+00, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
@@ -318,6 +330,7 @@ float fpPostInc() {
// CHECK: %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
// LLVM: define float @fpPostInc()
+// LLVM: %[[RV:.*]] = alloca float, i64 1, align 4
// LLVM: %[[A:.*]] = alloca float, i64 1, align 4
// LLVM: store float 1.000000e+00, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
@@ -342,6 +355,7 @@ float fpPostDec() {
// CHECK: %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
// LLVM: define float @fpPostDec()
+// LLVM: %[[RV:.*]] = alloca float, i64 1, align 4
// LLVM: %[[A:.*]] = alloca float, i64 1, align 4
// LLVM: store float 1.000000e+00, ptr %[[A]], align 4
// LLVM: %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
@@ -372,6 +386,7 @@ float fpPostInc2() {
// CHECK: %[[B_TO_OUTPUT:.*]] = cir.load %[[B]]
// LLVM: define float @fpPostInc2()
+// LLVM: %[[RV:.*]] = alloca float, i64 1, align 4
// LLVM: %[[A:.*]] = alloca float, i64 1, align 4
// LLVM: %[[B:.*]] = alloca float, i64 1, align 4
// LLVM: store float 1.000000e+00, ptr %[[A]], align 4
diff --git a/clang/test/CIR/Lowering/basic.cpp b/clang/test/CIR/Lowering/basic.cpp
index d1dc343a068a8..9627057a469a5 100644
--- a/clang/test/CIR/Lowering/basic.cpp
+++ b/clang/test/CIR/Lowering/basic.cpp
@@ -6,9 +6,12 @@ int f1() {
}
// CHECK: define{{.*}} i32 @f1() {
+// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[I_PTR:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[I:.*]] = load i32, ptr %[[I_PTR]], align 4
-// CHECK: ret i32 %[[I]]
+// CHECK: store i32 %[[I]], ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// CHECK: ret i32 %[[R]]
int f2() {
const int i = 2;
@@ -16,10 +19,13 @@ int f2() {
}
// CHECK: define{{.*}} i32 @f2() {
+// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: %[[I_PTR:.*]] = alloca i32, i64 1, align 4
// CHECK: store i32 2, ptr %[[I_PTR]], align 4
// CHECK: %[[I:.*]] = load i32, ptr %[[I_PTR]], align 4
-// CHECK: ret i32 %[[I]]
+// CHECK: store i32 %[[I]], ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// CHECK: ret i32 %[[R]]
int f3(int i) {
return i;
@@ -27,9 +33,12 @@ int f3(int i) {
// CHECK: define{{.*}} i32 @f3(i32 %[[ARG:.*]])
// CHECK: %[[ARG_ALLOCA:.*]] = alloca i32, i64 1, align 4
+// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: store i32 %[[ARG]], ptr %[[ARG_ALLOCA]], align 4
// CHECK: %[[ARG_VAL:.*]] = load i32, ptr %[[ARG_ALLOCA]], align 4
-// CHECK: ret i32 %[[ARG_VAL]]
+// CHECK: store i32 %[[ARG_VAL]], ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// CHECK: ret i32 %[[R]]
int f4(const int i) {
return i;
@@ -37,6 +46,9 @@ int f4(const int i) {
// CHECK: define{{.*}} i32 @f4(i32 %[[ARG:.*]])
// CHECK: %[[ARG_ALLOCA:.*]] = alloca i32, i64 1, align 4
+// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
// CHECK: store i32 %[[ARG]], ptr %[[ARG_ALLOCA]], align 4
// CHECK: %[[ARG_VAL:.*]] = load i32, ptr %[[ARG_ALLOCA]], align 4
-// CHECK: ret i32 %[[ARG_VAL]]
+// CHECK: store i32 %[[ARG_VAL]], ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// CHECK: ret i32 %[[R]]
diff --git a/clang/test/CIR/Lowering/func-simple.cpp b/clang/test/CIR/Lowering/func-simple.cpp
index 32d75cdd2c15d..f22562a66e066 100644
--- a/clang/test/CIR/Lowering/func-simple.cpp
+++ b/clang/test/CIR/Lowering/func-simple.cpp
@@ -11,7 +11,10 @@ void voidret() { return; }
int intfunc() { return 42; }
// CHECK: define{{.*}} i32 @intfunc()
-// CHECK: ret i32 42
+// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
+// CHECK: store i32 42, ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// CHECK: ret i32 %[[R]]
int scopes() {
{
@@ -21,34 +24,52 @@ int scopes() {
}
}
// CHECK: define{{.*}} i32 @scopes() {
-// CHECK: br label %[[LABEL1:.*]]
-// CHECK: [[LABEL1]]:
-// CHECK: br label %[[LABEL2:.*]]
-// CHECK: [[LABEL2]]:
-// CHECK: ret i32 99
-// CHECK: [[LABEL3:.*]]:
-// CHECK: br label %[[LABEL4:.*]]
-// CHECK: [[LABEL4]]:
-// CHECK: call void @llvm.trap()
-// CHECK: unreachable
+// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
+// CHECK: br label %[[LABEL1:.*]]
+// CHECK: [[LABEL1]]:
+// CHECK: br label %[[LABEL2:.*]]
+// CHECK: [[LABEL2]]:
+// CHECK: store i32 99, ptr %[[RV]], align 4
+// CHECK: br label %[[LABEL3:.*]]
+// CHECK: [[LABEL3]]:
+// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// CHECK: ret i32 %[[R]]
+// CHECK: [[LABEL4:.*]]:
+// CHECK: br label %[[LABEL5:.*]]
+// CHECK: [[LABEL5]]:
+// CHECK: call void @llvm.trap()
+// CHECK: unreachable
// CHECK: }
long longfunc() { return 42l; }
// CHECK: define{{.*}} i64 @longfunc() {
-// CHECK: ret i64 42
+// CHECK: %[[RV]] = alloca i64, i64 1, align 8
+// CHECK: store i64 42, ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i64, ptr %[[RV]], align 4
+// CHECK: ret i64 %[[R]]
// CHECK: }
unsigned unsignedfunc() { return 42u; }
// CHECK: define{{.*}} i32 @unsignedfunc() {
-// CHECK: ret i32 42
+// CHECK: %[[RV:.*]] = alloca i32, i64 1, align 4
+// CHECK: store i32 42, ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i32, ptr %[[RV]], align 4
+// CHECK: ret i32 %[[R]]
// CHECK: }
unsigned long long ullfunc() { return 42ull; }
// CHECK: define{{.*}} i64 @ullfunc() {
-// CHECK: ret i64 42
+// CHECK: %[[RV:.*]] = alloca i64, i64 1, align 8
+// CHECK: store i64 42, ptr %[[RV]], align 4
+// CHECK: %[[R:.*]] = load i64, ptr %[[RV]], align 4
+// CHECK: ret i64 %[[R]]
// CHECK: }
bool boolfunc() { return true; }
// CHECK: define{{.*}} i1 @boolfunc() {
-// CHECK: ret i1 true
+// CHECK: %[[RV:.*]] = alloca i8, i64 1, align 1
+// CHECK: store i8 1, ptr %[[RV]], align 1
+// CHECK: %[[R8:.*]] = load i8, ptr %[[RV]], align 1
+// CHECK: %[[R:.*]] = trunc i8 %[[R8]] to i1
+// CHECK: ret i1 %[[R]]
// CHECK: }
diff --git a/clang/test/CIR/func-simple.cpp b/clang/test/CIR/func-simple.cpp
index d37ccc7229f22..a0440a941d388 100644
--- a/clang/test/CIR/func-simple.cpp
+++ b/clang/test/CIR/func-simple.cpp
@@ -8,13 +8,20 @@ void empty() { }
void voidret() { return; }
// CHECK: cir.func @voidret() {
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
// CHECK: cir.return
// CHECK: }
int intfunc() { return 42; }
// CHECK: cir.func @intfunc() -> !cir.int<s, 32> {
-// CHECK: %0 = cir.const #cir.int<42> : !cir.int<s, 32>
-// CHECK: cir.return %0 : !cir.int<s, 32>
+// CHECK: %0 = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
+// CHECK: %1 = cir.const #cir.int<42> : !cir.int<s, 32>
+// CHECK: cir.store %1, %0 : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CHECK: cir.return %2 : !cir.int<s, 32>
// CHECK: }
int scopes() {
@@ -25,10 +32,15 @@ int scopes() {
}
}
// CHECK: cir.func @scopes() -> !cir.int<s, 32> {
+// CHECK: %0 = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, ["__retval"] {alignment = 4 : i64}
// CHECK: cir.scope {
// CHECK: cir.scope {
-// CHECK: %0 = cir.const #cir.int<99> : !cir.int<s, 32>
-// CHECK: cir.return %0 : !cir.int<s, 32>
+// CHECK: %1 = cir.const #cir.int<99> : !cir.int<s, 32>
+// CHECK: cir.store %1, %0 : !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.int<s, 32>>, !cir.int<s, 32>
+// CHECK: cir.return %2 : !cir.int<s, 32>
// CHECK: }
// CHECK: }
// CHECK: cir.trap
@@ -36,36 +48,66 @@ int scopes() {
long longfunc() { return 42l; }
// CHECK: cir.func @longfunc() -> !cir.int<s, 64>
-// CHECK: %0 = cir.const #cir.int<42> : !cir.int<s, 64>
-// CHECK: cir.return %0 : !cir.int<s, 64>
+// CHECK: %0 = cir.alloca !cir.int<s, 64>, !cir.ptr<!cir.int<s, 64>>, ["__retval"] {alignment = 8 : i64}
+// CHECK: %1 = cir.const #cir.int<42> : !cir.int<s, 64>
+// CHECK: cir.store %1, %0 : !cir.int<s, 64>, !cir.ptr<!cir.int<s, 64>>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.int<s, 64>>, !cir.int<s, 64>
+// CHECK: cir.return %2 : !cir.int<s, 64>
// CHECK: }
unsigned unsignedfunc() { return 42u; }
// CHECK: cir.func @unsignedfunc() -> !cir.int<u, 32>
-// CHECK: %0 = cir.const #cir.int<42> : !cir.int<u, 32>
-// CHECK: cir.return %0 : !cir.int<u, 32>
+// CHECK: %0 = cir.alloca !cir.int<u, 32>, !cir.ptr<!cir.int<u, 32>>, ["__retval"] {alignment = 4 : i64}
+// CHECK: %1 = cir.const #cir.int<42> : !cir.int<u, 32>
+// CHECK: cir.store %1, %0 : !cir.int<u, 32>, !cir.ptr<!cir.int<u, 32>>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.int<u, 32>>, !cir.int<u, 32>
+// CHECK: cir.return %2 : !cir.int<u, 32>
// CHECK: }
unsigned long long ullfunc() { return 42ull; }
// CHECK: cir.func @ullfunc() -> !cir.int<u, 64>
-// CHECK: %0 = cir.const #cir.int<42> : !cir.int<u, 64>
-// CHECK: cir.return %0 : !cir.int<u, 64>
+// CHECK: %0 = cir.alloca !cir.int<u, 64>, !cir.ptr<!cir.int<u, 64>>, ["__retval"] {alignment = 8 : i64}
+// CHECK: %1 = cir.const #cir.int<42> : !cir.int<u, 64>
+// CHECK: cir.store %1, %0 : !cir.int<u, 64>, !cir.ptr<!cir.int<u, 64>>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.int<u, 64>>, !cir.int<u, 64>
+// CHECK: cir.return %2 : !cir.int<u, 64>
// CHECK: }
bool boolfunc() { return true; }
// CHECK: cir.func @boolfunc() -> !cir.bool {
-// CHECK: %0 = cir.const #true
-// CHECK: cir.return %0 : !cir.bool
+// CHECK: %0 = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["__retval"] {alignment = 1 : i64}
+// CHECK: %1 = cir.const #true
+// CHECK: cir.store %1, %0 : !cir.bool, !cir.ptr<!cir.bool>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK: cir.return %2 : !cir.bool
// CHECK: }
float floatfunc() { return 42.42f; }
// CHECK: cir.func @floatfunc() -> !cir.float {
-// CHECK: %0 = cir.const #cir.fp<4.242
-// CHECK: cir.return %0 : !cir.float
+// CHECK: %0 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["__retval"] {alignment = 4 : i64}
+// CHECK: %1 = cir.const #cir.fp<4.242
+// CHECK: cir.store %1, %0 : !cir.float, !cir.ptr<!cir.float>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.float>, !cir.float
+// CHECK: cir.return %2 : !cir.float
// CHECK: }
double doublefunc() { return 42.42; }
// CHECK: cir.func @doublefunc() -> !cir.double {
-// CHECK: %0 = cir.const #cir.fp<4.242
-// CHECK: cir.return %0 : !cir.double
+// CHECK: %0 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["__retval"] {alignment = 8 : i64}
+// CHECK: %1 = cir.const #cir.fp<4.242
+// CHECK: cir.store %1, %0 : !cir.double, !cir.ptr<!cir.double>
+// CHECK: cir.br ^bb1
+// CHECK: ^bb1:
+// CHECK: %2 = cir.load %0 : !cir.ptr<!cir.double>, !cir.double
+// CHECK: cir.return %2 : !cir.double
// CHECK: }
>From 172ea3a0ae17a182e1c2d1e999b152e0241218d4 Mon Sep 17 00:00:00 2001
From: David Olsen <dolsen at nvidia.com>
Date: Wed, 19 Mar 2025 08:49:32 -0700
Subject: [PATCH 2/2] Upstream LexicalScope: review feedback
Change `curr` to `cur` in names.
Fix typo in comment.
---
clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 20 ++++++++++----------
clang/lib/CIR/CodeGen/CIRGenFunction.h | 10 +++++-----
clang/lib/CIR/CodeGen/CIRGenStmt.cpp | 2 +-
3 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 5685339c9e637..16547f2401292 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -158,7 +158,7 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
void CIRGenFunction::LexicalScope::cleanup() {
CIRGenBuilderTy &builder = cgf.builder;
- LexicalScope *localScope = cgf.currLexScope;
+ LexicalScope *localScope = cgf.curLexScope;
if (returnBlock != nullptr) {
// Write out the return block, which loads the value from `__retval` and
@@ -168,16 +168,16 @@ void CIRGenFunction::LexicalScope::cleanup() {
(void)emitReturn(*returnLoc);
}
- mlir::Block *currBlock = builder.getBlock();
- if (isGlobalInit() && !currBlock)
+ mlir::Block *curBlock = builder.getBlock();
+ if (isGlobalInit() && !curBlock)
return;
- if (currBlock->mightHaveTerminator() && currBlock->getTerminator())
+ if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
return;
// Get rid of any empty block at the end of the scope.
bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
- if (!entryBlock && currBlock->empty()) {
- currBlock->erase();
+ if (!entryBlock && curBlock->empty()) {
+ curBlock->erase();
if (returnBlock != nullptr && returnBlock->getUses().empty())
returnBlock->erase();
return;
@@ -186,7 +186,7 @@ void CIRGenFunction::LexicalScope::cleanup() {
// Reached the end of the scope.
{
mlir::OpBuilder::InsertionGuard guard(builder);
- builder.setInsertionPointToEnd(currBlock);
+ builder.setInsertionPointToEnd(curBlock);
if (localScope->depth == 0) {
// Reached the end of the function.
@@ -203,7 +203,7 @@ void CIRGenFunction::LexicalScope::cleanup() {
}
// Reached the end of a non-function scope. Some scopes, such as those
// used with the ?: operator, can return a value.
- if (!localScope->isTernary() && !currBlock->mightHaveTerminator()) {
+ if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
!retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
: builder.create<cir::YieldOp>(localScope->endLoc, retVal);
}
@@ -223,7 +223,7 @@ cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
return builder.create<cir::ReturnOp>(loc);
}
-// This is copyied from CodeGenModule::MayDropFunctionReturn. This is a
+// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
// candidate for sharing between CIRGen and CodeGen.
static bool mayDropFunctionReturn(const ASTContext &astContext,
QualType returnType) {
@@ -239,7 +239,7 @@ static bool mayDropFunctionReturn(const ASTContext &astContext,
void CIRGenFunction::LexicalScope::emitImplicitReturn() {
CIRGenBuilderTy &builder = cgf.getBuilder();
- LexicalScope *localScope = cgf.currLexScope;
+ LexicalScope *localScope = cgf.curLexScope;
const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index b52f5ec734f70..ba05fb46a3c46 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -378,11 +378,11 @@ class CIRGenFunction : public CIRGenTypeCache {
unsigned depth = 0;
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
- : cgf(cgf), entryBlock(eb), parentScope(cgf.currLexScope),
- beginLoc(loc), endLoc(loc) {
+ : cgf(cgf), entryBlock(eb), parentScope(cgf.curLexScope), beginLoc(loc),
+ endLoc(loc) {
assert(entryBlock && "LexicalScope requires an entry block");
- cgf.currLexScope = this;
+ cgf.curLexScope = this;
if (parentScope)
++depth;
@@ -396,7 +396,7 @@ class CIRGenFunction : public CIRGenTypeCache {
void setRetVal(mlir::Value v) { retVal = v; }
void cleanup();
- void restore() { cgf.currLexScope = parentScope; }
+ void restore() { cgf.curLexScope = parentScope; }
~LexicalScope() {
assert(!cir::MissingFeatures::generateDebugInfo());
@@ -465,7 +465,7 @@ class CIRGenFunction : public CIRGenTypeCache {
mlir::Block *getEntryBlock() { return entryBlock; }
};
- LexicalScope *currLexScope = nullptr;
+ LexicalScope *curLexScope = nullptr;
Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
const Twine &name = "tmp");
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index d4bc2db24d95c..bd1aa632da1d0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -274,7 +274,7 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
}
- auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc);
+ auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
builder.create<cir::BrOp>(loc, retBlock);
builder.createBlock(builder.getBlock()->getParent());
More information about the cfe-commits
mailing list