[clang] 9b9b9c6 - [CIR] Add support for lambda expressions (#157751)

via cfe-commits cfe-commits at lists.llvm.org
Mon Sep 22 08:29:12 PDT 2025


Author: Andy Kaylor
Date: 2025-09-22T08:29:08-07:00
New Revision: 9b9b9c631b5fe40996650f63dc8e0d253ff3a6b7

URL: https://github.com/llvm/llvm-project/commit/9b9b9c631b5fe40996650f63dc8e0d253ff3a6b7
DIFF: https://github.com/llvm/llvm-project/commit/9b9b9c631b5fe40996650f63dc8e0d253ff3a6b7.diff

LOG: [CIR] Add support for lambda expressions (#157751)

This adds support for lambda operators and lambda calls. This does not
include support for static lambda invoke, which will be added in a later
change.

Added: 
    clang/test/CIR/CodeGen/lambda.cpp

Modified: 
    clang/include/clang/CIR/Dialect/IR/CIROps.td
    clang/include/clang/CIR/MissingFeatures.h
    clang/lib/CIR/CodeGen/CIRGenClass.cpp
    clang/lib/CIR/CodeGen/CIRGenExpr.cpp
    clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.h
    clang/lib/CIR/CodeGen/CIRGenModule.h
    clang/lib/CIR/Dialect/IR/CIRDialect.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index f1e24a5215dc8..bb394440bf8d8 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -2326,6 +2326,10 @@ def CIR_FuncOp : CIR_Op<"func", [
     The function linkage information is specified by `linkage`, as defined by
     `GlobalLinkageKind` attribute.
 
+    The `lambda` translates to a C++ `operator()` that implements a lambda, this
+    allow callsites to make certain assumptions about the real function nature
+    when writing analysis.
+
     The `no_proto` keyword is used to identify functions that were declared
     without a prototype and, consequently, may contain calls with invalid
     arguments and undefined behavior.
@@ -2348,6 +2352,7 @@ def CIR_FuncOp : CIR_Op<"func", [
   let arguments = (ins SymbolNameAttr:$sym_name,
                        CIR_VisibilityAttr:$global_visibility,
                        TypeAttrOf<CIR_FuncType>:$function_type,
+                       UnitAttr:$lambda,
                        UnitAttr:$no_proto,
                        UnitAttr:$dso_local,
                        DefaultValuedAttr<CIR_GlobalLinkageKind,

diff  --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index c2c6d18806308..9d2cf03b24c0c 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -188,6 +188,7 @@ struct MissingFeatures {
   static bool builtinCallF128() { return false; }
   static bool builtinCallMathErrno() { return false; }
   static bool builtinCheckKind() { return false; }
+  static bool cgCapturedStmtInfo() { return false; }
   static bool cgFPOptionsRAII() { return false; }
   static bool cirgenABIInfo() { return false; }
   static bool cleanupAfterErrorDiags() { return false; }
@@ -234,7 +235,6 @@ struct MissingFeatures {
   static bool isMemcpyEquivalentSpecialMember() { return false; }
   static bool isTrivialCtorOrDtor() { return false; }
   static bool lambdaCaptures() { return false; }
-  static bool lambdaFieldToName() { return false; }
   static bool loopInfoStack() { return false; }
   static bool lowerAggregateLoadStore() { return false; }
   static bool lowerModeOptLevel() { return false; }

diff  --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 1a557beb610ea..18e62f0213dd6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -826,7 +826,7 @@ mlir::Value CIRGenFunction::getVTTParameter(GlobalDecl gd, bool forVirtualBase,
   if (!cgm.getCXXABI().needsVTTParameter(gd))
     return nullptr;
 
-  const CXXRecordDecl *rd = cast<CXXMethodDecl>(curFuncDecl)->getParent();
+  const CXXRecordDecl *rd = cast<CXXMethodDecl>(curCodeDecl)->getParent();
   const CXXRecordDecl *base = cast<CXXMethodDecl>(gd.getDecl())->getParent();
 
   uint64_t subVTTIndex;

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index e9f5752e4b696..62769e952e45d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -461,7 +461,8 @@ LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) {
 
   llvm::StringRef fieldName = field->getName();
   unsigned fieldIndex;
-  assert(!cir::MissingFeatures::lambdaFieldToName());
+  if (cgm.lambdaFieldToName.count(field))
+    fieldName = cgm.lambdaFieldToName[field];
 
   if (rec->isUnion())
     fieldIndex = field->getFieldIndex();
@@ -476,8 +477,16 @@ LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) {
 
   // If this is a reference field, load the reference right now.
   if (fieldType->isReferenceType()) {
-    cgm.errorNYI(field->getSourceRange(), "emitLValueForField: reference type");
-    return LValue();
+    assert(!cir::MissingFeatures::opTBAA());
+    LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
+    if (recordCVR & Qualifiers::Volatile)
+      refLVal.getQuals().addVolatile();
+    addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
+                               &fieldBaseInfo);
+
+    // Qualifiers on the struct don't apply to the referencee.
+    recordCVR = 0;
+    fieldType = fieldType->getPointeeType();
   }
 
   if (field->hasAttr<AnnotateAttr>()) {
@@ -619,6 +628,38 @@ static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
   return cgm.getAddrOfFunction(gd);
 }
 
+static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd,
+                                      mlir::Value thisValue) {
+  return cgf.emitLValueForLambdaField(fd, thisValue);
+}
+
+/// Given that we are currently emitting a lambda, emit an l-value for
+/// one of its members.
+///
+LValue CIRGenFunction::emitLValueForLambdaField(const FieldDecl *field,
+                                                mlir::Value thisValue) {
+  bool hasExplicitObjectParameter = false;
+  const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
+  LValue lambdaLV;
+  if (methD) {
+    hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
+    assert(methD->getParent()->isLambda());
+    assert(methD->getParent() == field->getParent());
+  }
+  if (hasExplicitObjectParameter) {
+    cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
+  } else {
+    QualType lambdaTagType =
+        getContext().getCanonicalTagType(field->getParent());
+    lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
+  }
+  return emitLValueForField(lambdaLV, field);
+}
+
+LValue CIRGenFunction::emitLValueForLambdaField(const FieldDecl *field) {
+  return emitLValueForLambdaField(field, cxxabiThisValue);
+}
+
 static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
                                      GlobalDecl gd) {
   const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
@@ -645,6 +686,57 @@ static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
                             AlignmentSource::Decl);
 }
 
+/// Determine whether we can emit a reference to \p vd from the current
+/// context, despite not necessarily having seen an odr-use of the variable in
+/// this context.
+/// TODO(cir): This could be shared with classic codegen.
+static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf,
+                                               const DeclRefExpr *e,
+                                               const VarDecl *vd) {
+  // For a variable declared in an enclosing scope, do not emit a spurious
+  // reference even if we have a capture, as that will emit an unwarranted
+  // reference to our capture state, and will likely generate worse code than
+  // emitting a local copy.
+  if (e->refersToEnclosingVariableOrCapture())
+    return false;
+
+  // For a local declaration declared in this function, we can always reference
+  // it even if we don't have an odr-use.
+  if (vd->hasLocalStorage()) {
+    return vd->getDeclContext() ==
+           dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
+  }
+
+  // For a global declaration, we can emit a reference to it if we know
+  // for sure that we are able to emit a definition of it.
+  vd = vd->getDefinition(cgf.getContext());
+  if (!vd)
+    return false;
+
+  // Don't emit a spurious reference if it might be to a variable that only
+  // exists on a 
diff erent device / target.
+  // FIXME: This is unnecessarily broad. Check whether this would actually be a
+  // cross-target reference.
+  if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
+      cgf.getLangOpts().OpenCL) {
+    return false;
+  }
+
+  // We can emit a spurious reference only if the linkage implies that we'll
+  // be emitting a non-interposable symbol that will be retained until link
+  // time.
+  switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
+  case cir::GlobalLinkageKind::ExternalLinkage:
+  case cir::GlobalLinkageKind::LinkOnceODRLinkage:
+  case cir::GlobalLinkageKind::WeakODRLinkage:
+  case cir::GlobalLinkageKind::InternalLinkage:
+  case cir::GlobalLinkageKind::PrivateLinkage:
+    return true;
+  default:
+    return false;
+  }
+}
+
 LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
   const NamedDecl *nd = e->getDecl();
   QualType ty = e->getType();
@@ -652,6 +744,32 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
   assert(e->isNonOdrUse() != NOUR_Unevaluated &&
          "should not emit an unevaluated operand");
 
+  if (const auto *vd = dyn_cast<VarDecl>(nd)) {
+    // Global Named registers access via intrinsics only
+    if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
+        !vd->isLocalVarDecl()) {
+      cgm.errorNYI(e->getSourceRange(),
+                   "emitDeclRefLValue: Global Named registers access");
+      return LValue();
+    }
+
+    if (e->isNonOdrUse() == NOUR_Constant &&
+        (vd->getType()->isReferenceType() ||
+         !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
+      cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: NonOdrUse");
+      return LValue();
+    }
+
+    // Check for captured variables.
+    if (e->refersToEnclosingVariableOrCapture()) {
+      vd = vd->getCanonicalDecl();
+      if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
+        return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
+      assert(!cir::MissingFeatures::cgCapturedStmtInfo());
+      assert(!cir::MissingFeatures::openMP());
+    }
+  }
+
   if (const auto *vd = dyn_cast<VarDecl>(nd)) {
     // Checks for omitted feature handling
     assert(!cir::MissingFeatures::opAllocaStaticLocal());

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index dc34d2b3baa8d..5615960ea5247 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -99,6 +99,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
     assert(!cir::MissingFeatures::aggValueSlotDestructedFlag());
     Visit(e->getSubExpr());
   }
+  void VisitLambdaExpr(LambdaExpr *e);
 
   // Stubs -- These should be moved up when they are implemented.
   void VisitCastExpr(CastExpr *e) {
@@ -239,9 +240,6 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
     cgf.cgm.errorNYI(e->getSourceRange(),
                      "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
   }
-  void VisitLambdaExpr(LambdaExpr *e) {
-    cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitLambdaExpr");
-  }
   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
     cgf.cgm.errorNYI(e->getSourceRange(),
                      "AggExprEmitter: VisitCXXStdInitializerListExpr");
@@ -495,8 +493,10 @@ void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
   if (isa<NoInitExpr>(e))
     return;
 
-  if (type->isReferenceType())
-    cgf.cgm.errorNYI("emitInitializationToLValue ReferenceType");
+  if (type->isReferenceType()) {
+    RValue rv = cgf.emitReferenceBindingToExpr(e);
+    return cgf.emitStoreThroughLValue(rv, lv);
+  }
 
   switch (cgf.getEvaluationKind(type)) {
   case cir::TEK_Complex:
@@ -550,6 +550,47 @@ void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
   cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
 }
 
+void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) {
+  CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())};
+  AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
+  [[maybe_unused]] LValue slotLV =
+      cgf.makeAddrLValue(slot.getAddress(), e->getType());
+
+  // We'll need to enter cleanup scopes in case any of the element
+  // initializers throws an exception or contains branch out of the expressions.
+  assert(!cir::MissingFeatures::opScopeCleanupRegion());
+
+  for (auto [curField, capture, captureInit] : llvm::zip(
+           e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) {
+    // Pick a name for the field.
+    llvm::StringRef fieldName = curField->getName();
+    if (capture.capturesVariable()) {
+      assert(!curField->isBitField() && "lambdas don't have bitfield members!");
+      ValueDecl *v = capture.getCapturedVar();
+      fieldName = v->getName();
+      cgf.cgm.lambdaFieldToName[curField] = fieldName;
+    } else if (capture.capturesThis()) {
+      cgf.cgm.lambdaFieldToName[curField] = "this";
+    } else {
+      cgf.cgm.errorNYI(e->getSourceRange(), "Unhandled capture kind");
+      cgf.cgm.lambdaFieldToName[curField] = "unhandled-capture-kind";
+    }
+
+    // Emit initialization
+    LValue lv =
+        cgf.emitLValueForFieldInitialization(slotLV, curField, fieldName);
+    if (curField->hasCapturedVLAType())
+      cgf.cgm.errorNYI(e->getSourceRange(), "lambda captured VLA type");
+
+    emitInitializationToLValue(captureInit, lv);
+
+    // Push a destructor if necessary.
+    if ([[maybe_unused]] QualType::DestructionKind DtorKind =
+            curField->getType().isDestructedType())
+      cgf.cgm.errorNYI(e->getSourceRange(), "lambda with destructed field");
+  }
+}
+
 void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
   if (e->getCallReturnType(cgf.getContext())->isReferenceType()) {
     cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index e2181b8222aa2..f43a0e60c9f5b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -405,6 +405,7 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
   curFn = fn;
 
   const Decl *d = gd.getDecl();
+  curCodeDecl = d;
   const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
   curFuncDecl = d->getNonClosureContext();
 
@@ -457,7 +458,36 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
 
     const auto *md = cast<CXXMethodDecl>(d);
     if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
-      cgm.errorNYI(loc, "lambda call operator");
+      // We're in a lambda.
+      curFn.setLambda(true);
+
+      // Figure out the captures.
+      md->getParent()->getCaptureFields(lambdaCaptureFields,
+                                        lambdaThisCaptureField);
+      if (lambdaThisCaptureField) {
+        // If the lambda captures the object referred to by '*this' - either by
+        // value or by reference, make sure CXXThisValue points to the correct
+        // object.
+
+        // Get the lvalue for the field (which is a copy of the enclosing object
+        // or contains the address of the enclosing object).
+        LValue thisFieldLValue =
+            emitLValueForLambdaField(lambdaThisCaptureField);
+        if (!lambdaThisCaptureField->getType()->isPointerType()) {
+          // If the enclosing object was captured by value, just use its
+          // address. Sign this pointer.
+          cxxThisValue = thisFieldLValue.getPointer();
+        } else {
+          // Load the lvalue pointed to by the field, since '*this' was captured
+          // by reference.
+          cxxThisValue =
+              emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
+        }
+      }
+      for (auto *fd : md->getParent()->fields()) {
+        if (fd->hasCapturedVLAType())
+          cgm.errorNYI(loc, "lambda captured VLA type");
+      }
     } else {
       // Not in a lambda; just use 'this' from the method.
       // FIXME: Should we generate a new load for each use of 'this'? The fast

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index d107d481e3ce2..a0c571a544322 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -73,6 +73,10 @@ class CIRGenFunction : public CIRGenTypeCache {
   /// Tracks function scope overall cleanup handling.
   EHScopeStack ehStack;
 
+  llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
+      lambdaCaptureFields;
+  clang::FieldDecl *lambdaThisCaptureField = nullptr;
+
   /// CXXThisDecl - When generating code for a C++ member function,
   /// this will hold the implicit 'this' declaration.
   ImplicitParamDecl *cxxabiThisDecl = nullptr;
@@ -91,6 +95,8 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   // Holds the Decl for the current outermost non-closure context
   const clang::Decl *curFuncDecl = nullptr;
+  /// This is the inner-most code context, which includes blocks.
+  const clang::Decl *curCodeDecl = nullptr;
 
   /// The function for which code is currently being generated.
   cir::FuncOp curFn;
@@ -1385,6 +1391,10 @@ class CIRGenFunction : public CIRGenTypeCache {
   LValue emitLValueForBitField(LValue base, const FieldDecl *field);
   LValue emitLValueForField(LValue base, const clang::FieldDecl *field);
 
+  LValue emitLValueForLambdaField(const FieldDecl *field);
+  LValue emitLValueForLambdaField(const FieldDecl *field,
+                                  mlir::Value thisValue);
+
   /// Like emitLValueForField, excpet that if the Field is a reference, this
   /// will return the address of the reference and not the address of the value
   /// stored in the reference.

diff  --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 95a7ac0648bb7..073e8d96b773b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -121,6 +121,12 @@ class CIRGenModule : public CIRGenTypeCache {
 
   mlir::Operation *lastGlobalOp = nullptr;
 
+  /// Keep a map between lambda fields and names, this needs to be per module
+  /// since lambdas might get generated later as part of defered work, and since
+  /// the pointers are supposed to be uniqued, should be fine. Revisit this if
+  /// it ends up taking too much memory.
+  llvm::DenseMap<const clang::FieldDecl *, llvm::StringRef> lambdaFieldToName;
+
   /// Tell the consumer that this variable has been instantiated.
   void handleCXXStaticMemberVarInstantiation(VarDecl *vd);
 

diff  --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 53126348c3bdc..58ef500446aa7 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1546,11 +1546,14 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) {
   llvm::SMLoc loc = parser.getCurrentLocation();
   mlir::Builder &builder = parser.getBuilder();
 
+  mlir::StringAttr lambdaNameAttr = getLambdaAttrName(state.name);
   mlir::StringAttr noProtoNameAttr = getNoProtoAttrName(state.name);
   mlir::StringAttr visNameAttr = getSymVisibilityAttrName(state.name);
   mlir::StringAttr visibilityNameAttr = getGlobalVisibilityAttrName(state.name);
   mlir::StringAttr dsoLocalNameAttr = getDsoLocalAttrName(state.name);
 
+  if (::mlir::succeeded(parser.parseOptionalKeyword(lambdaNameAttr.strref())))
+    state.addAttribute(lambdaNameAttr, parser.getBuilder().getUnitAttr());
   if (parser.parseOptionalKeyword(noProtoNameAttr).succeeded())
     state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr());
 
@@ -1658,6 +1661,9 @@ mlir::Region *cir::FuncOp::getCallableRegion() {
 }
 
 void cir::FuncOp::print(OpAsmPrinter &p) {
+  if (getLambda())
+    p << " lambda";
+
   if (getNoProto())
     p << " no_proto";
 

diff  --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp
new file mode 100644
index 0000000000000..033adc60be1ed
--- /dev/null
+++ b/clang/test/CIR/CodeGen/lambda.cpp
@@ -0,0 +1,486 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+// We declare anonymous record types to represent lambdas. Rather than trying to
+// to match the declarations, we establish variables for these when they are used.
+
+void fn() {
+  auto a = [](){};
+  a();
+}
+
+// CIR: cir.func lambda internal private dso_local @_ZZ2fnvENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_FN_A:.*]]> {{.*}})
+// CIR:   %[[THIS:.*]] = cir.alloca !cir.ptr<![[REC_LAM_FN_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_FN_A]]>>, ["this", init]
+// CIR:   cir.store %[[THIS_ARG]], %[[THIS]]
+// CIR:   cir.load %[[THIS]]
+// CIR:   cir.return
+
+// CIR: cir.func dso_local @_Z2fnv()
+// CIR:   %[[A:.*]] = cir.alloca ![[REC_LAM_FN_A]], !cir.ptr<![[REC_LAM_FN_A]]>, ["a"]
+// CIR:   cir.call @_ZZ2fnvENK3$_0clEv(%[[A]])
+
+// LLVM: define internal void @"_ZZ2fnvENK3$_0clEv"(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM:   ret void
+
+// FIXME: parameter attributes should be emitted
+// LLVM: define {{.*}} void @_Z2fnv()
+// LLVM:   [[A:%.*]] = alloca %[[REC_LAM_FN_A:.*]], i64 1, align 1
+// LLVM:   call void @"_ZZ2fnvENK3$_0clEv"(ptr [[A]])
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z2fnv()
+// OGCG:   %[[A:.*]] = alloca %[[REC_LAM_FN_A:.*]]
+// OGCG:   call void @"_ZZ2fnvENK3$_0clEv"(ptr {{.*}} %[[A]])
+// OGCG:   ret void
+
+// OGCG: define internal void @"_ZZ2fnvENK3$_0clEv"(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG:   ret void
+
+void l0() {
+  int i;
+  auto a = [&](){ i = i + 1; };
+  a();
+}
+
+// CIR: cir.func lambda internal private dso_local @_ZZ2l0vENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_L0_A:.*]]> {{.*}})
+// CIR:   %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_L0_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_L0_A]]>>, ["this", init] {alignment = 8 : i64}
+// CIR:   cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:   %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR:   %[[I_ADDR_ADDR:.*]] = cir.get_member %[[THIS]][0] {name = "i"}
+// CIR:   %[[I_ADDR:.*]] = cir.load %[[I_ADDR_ADDR]]
+// CIR:   %[[I:.*]] = cir.load align(4) %[[I_ADDR]]
+// CIR:   %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+// CIR:   %[[I_PLUS_ONE:.*]] = cir.binop(add, %[[I]], %[[ONE]]) nsw
+// CIR:   %[[I_ADDR_ADDR:.*]] = cir.get_member %[[THIS]][0] {name = "i"}
+// CIR:   %[[I_ADDR:.*]] = cir.load %[[I_ADDR_ADDR]]
+// CIR:   cir.store{{.*}} %[[I_PLUS_ONE]], %[[I_ADDR]]
+// CIR:   cir.return
+
+// CIR: cir.func {{.*}} @_Z2l0v()
+// CIR:   %[[I:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i"]
+// CIR:   %[[A:.*]] = cir.alloca ![[REC_LAM_L0_A]], !cir.ptr<![[REC_LAM_L0_A]]>, ["a", init]
+// CIR:   %[[I_ADDR:.*]] = cir.get_member %[[A]][0] {name = "i"}
+// CIR:   cir.store{{.*}} %[[I]], %[[I_ADDR]]
+// CIR:   cir.call @_ZZ2l0vENK3$_0clEv(%[[A]])
+// CIR:   cir.return
+
+// LLVM: define internal void @"_ZZ2l0vENK3$_0clEv"(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM:   %[[I_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_L0_A:.*]], ptr %[[THIS]], i32 0, i32 0
+// LLVM:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// LLVM:   %[[I:.*]] = load i32, ptr %[[I_ADDR]]
+// LLVM:   %[[ADD:.*]] = add nsw i32 %[[I]], 1
+// LLVM:   %[[I_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_L0_A]], ptr %[[THIS]], i32 0, i32 0
+// LLVM:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// LLVM:   store i32 %[[ADD]], ptr %[[I_ADDR]]
+// LLVM:   ret void
+
+// LLVM: define {{.*}} void @_Z2l0v()
+// LLVM:   %[[I:.*]] = alloca i32
+// LLVM:   %[[A:.*]] = alloca %[[REC_LAM_L0_A]]
+// LLVM:   %[[I_ADDR:.*]] = getelementptr %[[REC_LAM_L0_A]], ptr %[[A]], i32 0, i32 0
+// LLVM:   store ptr %[[I]], ptr %[[I_ADDR]]
+// LLVM:   call void @"_ZZ2l0vENK3$_0clEv"(ptr %[[A]])
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z2l0v()
+// OGCG:   %[[I:.*]] = alloca i32
+// OGCG:   %[[A:.*]] = alloca %[[REC_LAM_L0_A:.*]],
+// OGCG:   %[[I_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_L0_A]], ptr %[[A]], i32 0, i32 0
+// OGCG:   store ptr %[[I]], ptr %[[I_ADDR]]
+// OGCG:   call void @"_ZZ2l0vENK3$_0clEv"(ptr {{.*}} %[[A]])
+// OGCG:   ret void
+
+// OGCG: define internal void @"_ZZ2l0vENK3$_0clEv"(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG:   %[[I_ADDR_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_L0_A]], ptr %[[THIS]], i32 0, i32 0
+// OGCG:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// OGCG:   %[[I:.*]] = load i32, ptr %[[I_ADDR]]
+// OGCG:   %[[ADD:.*]] = add nsw i32 %[[I]], 1
+// OGCG:   %[[I_ADDR_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_L0_A]], ptr %[[THIS]], i32 0, i32 0
+// OGCG:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// OGCG:   store i32 %[[ADD]], ptr %[[I_ADDR]]
+// OGCG:   ret void
+
+auto g() {
+  int i = 12;
+  return [&] {
+    i += 100;
+    return i;
+  };
+}
+
+// CIR: cir.func dso_local @_Z1gv() -> ![[REC_LAM_G:.*]] {
+// CIR:   %[[RETVAL:.*]] = cir.alloca ![[REC_LAM_G]], !cir.ptr<![[REC_LAM_G]]>, ["__retval"]
+// CIR:   %[[I_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init]
+// CIR:   %[[TWELVE:.*]] = cir.const #cir.int<12> : !s32i
+// CIR:   cir.store{{.*}} %[[TWELVE]], %[[I_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR:   %[[I_ADDR_ADDR:.*]] = cir.get_member %[[RETVAL]][0] {name = "i"} : !cir.ptr<![[REC_LAM_G]]> -> !cir.ptr<!cir.ptr<!s32i>>
+// CIR:   cir.store{{.*}} %[[I_ADDR]], %[[I_ADDR_ADDR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR:   %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]] : !cir.ptr<![[REC_LAM_G]]>, ![[REC_LAM_G]]
+// CIR:   cir.return %[[RET]] : ![[REC_LAM_G]]
+
+// Note: In this case, OGCG returns a pointer to the 'i' field of the lambda,
+//       whereas CIR and LLVM return the lambda itself.
+
+// LLVM: define dso_local %[[REC_LAM_G:.*]] @_Z1gv()
+// LLVM:   %[[RETVAL:.*]] = alloca %[[REC_LAM_G]]
+// LLVM:   %[[I:.*]] = alloca i32
+// LLVM:   store i32 12, ptr %[[I]]
+// LLVM:   %[[I_ADDR:.*]] = getelementptr %[[REC_LAM_G]], ptr %[[RETVAL]], i32 0, i32 0
+// LLVM:   store ptr %[[I]], ptr %[[I_ADDR]]
+// LLVM:   %[[RET:.*]] = load %[[REC_LAM_G]], ptr %[[RETVAL]]
+// LLVM:   ret %[[REC_LAM_G]] %[[RET]]
+
+// OGCG: define dso_local ptr @_Z1gv()
+// OGCG:   %[[RETVAL:.*]] = alloca %[[REC_LAM_G:.*]],
+// OGCG:   %[[I:.*]] = alloca i32
+// OGCG:   store i32 12, ptr %[[I]]
+// OGCG:   %[[I_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_G]], ptr %[[RETVAL]], i32 0, i32 0
+// OGCG:   store ptr %[[I]], ptr %[[I_ADDR]]
+// OGCG:   %[[COERCE_DIVE:.*]] = getelementptr inbounds nuw %[[REC_LAM_G]], ptr %[[RETVAL]], i32 0, i32 0
+// OGCG:   %[[RET:.*]] = load ptr, ptr %[[COERCE_DIVE]]
+// OGCG:   ret ptr %[[RET]]
+
+auto g2() {
+  int i = 12;
+  auto lam = [&] {
+    i += 100;
+    return i;
+  };
+  return lam;
+}
+
+// Should be same as above because of NRVO
+// CIR: cir.func dso_local @_Z2g2v() -> ![[REC_LAM_G2:.*]] {
+// CIR:   %[[RETVAL:.*]] = cir.alloca ![[REC_LAM_G2]], !cir.ptr<![[REC_LAM_G2]]>, ["__retval", init]
+// CIR:   %[[I_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i", init]
+// CIR:   %[[TWELVE:.*]] = cir.const #cir.int<12> : !s32i
+// CIR:   cir.store{{.*}} %[[TWELVE]], %[[I_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR:   %[[I_ADDR_ADDR:.*]] = cir.get_member %[[RETVAL]][0] {name = "i"} : !cir.ptr<![[REC_LAM_G2]]> -> !cir.ptr<!cir.ptr<!s32i>>
+// CIR:   cir.store{{.*}} %[[I_ADDR]], %[[I_ADDR_ADDR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR:   %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]] : !cir.ptr<![[REC_LAM_G2]]>, ![[REC_LAM_G2]]
+// CIR:   cir.return %[[RET]] : ![[REC_LAM_G2]]
+
+// LLVM: define dso_local %[[REC_LAM_G:.*]] @_Z2g2v()
+// LLVM:   %[[RETVAL:.*]] = alloca %[[REC_LAM_G]]
+// LLVM:   %[[I:.*]] = alloca i32
+// LLVM:   store i32 12, ptr %[[I]]
+// LLVM:   %[[I_ADDR:.*]] = getelementptr %[[REC_LAM_G]], ptr %[[RETVAL]], i32 0, i32 0
+// LLVM:   store ptr %[[I]], ptr %[[I_ADDR]]
+// LLVM:   %[[RET:.*]] = load %[[REC_LAM_G]], ptr %[[RETVAL]]
+// LLVM:   ret %[[REC_LAM_G]] %[[RET]]
+
+// OGCG: define dso_local ptr @_Z2g2v()
+// OGCG:   %[[RETVAL:.*]] = alloca %[[REC_LAM_G2:.*]],
+// OGCG:   %[[I:.*]] = alloca i32
+// OGCG:   store i32 12, ptr %[[I]]
+// OGCG:   %[[I_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_G2]], ptr %[[RETVAL]], i32 0, i32 0
+// OGCG:   store ptr %[[I]], ptr %[[I_ADDR]]
+// OGCG:   %[[COERCE_DIVE:.*]] = getelementptr inbounds nuw %[[REC_LAM_G2]], ptr %[[RETVAL]], i32 0, i32 0
+// OGCG:   %[[RET:.*]] = load ptr, ptr %[[COERCE_DIVE]]
+// OGCG:   ret ptr %[[RET]]
+
+int f() {
+  return g2()();
+}
+
+// CIR:cir.func lambda internal private dso_local @_ZZ2g2vENK3$_0clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_G2]]> {{.*}}) -> !s32i
+// CIR:   %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_G2]]>, !cir.ptr<!cir.ptr<![[REC_LAM_G2]]>>, ["this", init]
+// CIR:   %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR:   cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:   %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR:   %[[ONE_HUNDRED:.*]] = cir.const #cir.int<100> : !s32i
+// CIR:   %[[I_ADDR_ADDR:.*]] = cir.get_member %[[THIS]][0] {name = "i"}
+// CIR:   %[[I_ADDR:.*]] = cir.load %[[I_ADDR_ADDR]]
+// CIR:   %[[I:.*]] = cir.load{{.*}} %[[I_ADDR]]
+// CIR:   %[[I_PLUS_ONE_HUNDRED:.*]] = cir.binop(add, %[[I]], %[[ONE_HUNDRED]]) nsw : !s32i
+// CIR:   cir.store{{.*}} %[[I_PLUS_ONE_HUNDRED]], %[[I_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR:   %[[I_ADDR_ADDR:.*]] = cir.get_member %[[THIS]][0] {name = "i"}
+// CIR:   %[[I_ADDR:.*]] = cir.load %[[I_ADDR_ADDR]]
+// CIR:   %[[I:.*]] = cir.load{{.*}} %[[I_ADDR]]
+// CIR:   cir.store{{.*}} %[[I]], %[[RETVAL]]
+// CIR:   %[[RET:.*]] = cir.load %[[RETVAL]]
+// CIR:   cir.return %[[RET]]
+
+// CIR: cir.func dso_local @_Z1fv() -> !s32i
+// CIR:   %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR:   %[[SCOPE_RET:.*]] = cir.scope {
+// CIR:     %[[TMP:.*]] = cir.alloca ![[REC_LAM_G2]], !cir.ptr<![[REC_LAM_G2]]>, ["ref.tmp0"]
+// CIR:     %[[G2:.*]] = cir.call @_Z2g2v() : () -> ![[REC_LAM_G2]]
+// CIR:     cir.store{{.*}} %[[G2]], %[[TMP]]
+// CIR:     %[[RESULT:.*]] = cir.call @_ZZ2g2vENK3$_0clEv(%[[TMP]])
+// CIR:     cir.yield %[[RESULT]]
+// CIR:   }
+// CIR:   cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
+// CIR:   %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
+// CIR:   cir.return %[[RET]]
+
+// LLVM: define internal i32 @"_ZZ2g2vENK3$_0clEv"(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[THIS_ALLOCA:.*]] = alloca ptr
+// LLVM:   %[[I_ALLOCA:.*]] = alloca i32
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[I_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_G2:.*]], ptr %[[THIS]], i32 0, i32 0
+// LLVM:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// LLVM:   %[[I:.*]] = load i32, ptr %[[I_ADDR]]
+// LLVM:   %[[ADD:.*]] = add nsw i32 %[[I]], 100
+// LLVM:   store i32 %[[ADD]], ptr %[[I_ADDR]]
+// LLVM:   %[[I_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_G2]], ptr %[[THIS]], i32 0, i32 0
+// LLVM:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// LLVM:   %[[I:.*]] = load i32, ptr %[[I_ADDR]]
+// LLVM:   store i32 %[[I]], ptr %[[I_ALLOCA]]
+// LLVM:   %[[RET:.*]] = load i32, ptr %[[I_ALLOCA]]
+// LLVM:   ret i32 %[[RET]]
+
+// LLVM: define {{.*}} i32 @_Z1fv()
+// LLVM:   %[[TMP:.*]] = alloca %[[REC_LAM_G2]]
+// LLVM:   %[[RETVAL:.*]] = alloca i32
+// LLVM:   br label %[[SCOPE_BB:.*]]
+// LLVM: [[SCOPE_BB]]:
+// LLVM:   %[[G2:.*]] = call %[[REC_LAM_G2]] @_Z2g2v()
+// LLVM:   store %[[REC_LAM_G2]] %[[G2]], ptr %[[TMP]]
+// LLVM:   %[[RESULT:.*]] = call i32 @"_ZZ2g2vENK3$_0clEv"(ptr %[[TMP]])
+// LLVM:   br label %[[RET_BB:.*]]
+// LLVM: [[RET_BB]]:
+// LLVM:   %[[RETPHI:.*]] = phi i32 [ %[[RESULT]], %[[SCOPE_BB]] ]
+// LLVM:   store i32 %[[RETPHI]], ptr %[[RETVAL]]
+// LLVM:   %[[RET:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM:   ret i32 %[[RET]]
+
+// The order of these functions is reversed in OGCG.
+
+// OGCG: define {{.*}} i32 @_Z1fv()
+// OGCG:   %[[TMP:.*]] = alloca %[[REC_LAM_G2]]
+// OGCG:   %[[RESULT:.*]] = call ptr @_Z2g2v()
+// OGCG:   %[[COERCE_DIVE:.*]] = getelementptr inbounds nuw %[[REC_LAM_G2]], ptr %[[TMP]], i32 0, i32 0
+// OGCG:   store ptr %[[RESULT]], ptr %[[COERCE_DIVE]]
+// OGCG:   %[[RET:.*]] = call {{.*}} i32 @"_ZZ2g2vENK3$_0clEv"(ptr {{.*}} %[[TMP]])
+// OGCG:   ret i32 %[[RET]]
+
+// OGCG: define internal noundef i32 @"_ZZ2g2vENK3$_0clEv"(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ALLOCA:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[I_ADDR_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_G2]], ptr %[[THIS]], i32 0, i32 0
+// OGCG:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// OGCG:   %[[I:.*]] = load i32, ptr %[[I_ADDR]]
+// OGCG:   %[[ADD:.*]] = add nsw i32 %[[I]], 100
+// OGCG:   store i32 %[[ADD]], ptr %[[I_ADDR]]
+// OGCG:   %[[I_ADDR_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_G2]], ptr %[[THIS]], i32 0, i32 0
+// OGCG:   %[[I_ADDR:.*]] = load ptr, ptr %[[I_ADDR_ADDR]]
+// OGCG:   %[[I:.*]] = load i32, ptr %[[I_ADDR]]
+// OGCG:   ret i32 %[[I]]
+
+struct A {
+  int a = 111;
+  int foo() { return [*this] { return a; }(); }
+  int bar() { return [this] { return a; }(); }
+};
+
+// This function gets emitted before the lambdas in OGCG.
+
+// OGCG: define {{.*}} i32 @_Z17test_lambda_this1v
+// OGCG:   %[[A_THIS:.*]] = alloca %struct.A
+// OGCG:   call void @_ZN1AC1Ev(ptr {{.*}} %[[A_THIS]])
+// OGCG:   call noundef i32 @_ZN1A3fooEv(ptr {{.*}} %[[A_THIS]])
+// OGCG:   call noundef i32 @_ZN1A3barEv(ptr {{.*}} %[[A_THIS]])
+
+// lambda operator() in foo()
+// CIR: cir.func lambda comdat linkonce_odr @_ZZN1A3fooEvENKUlvE_clEv(%[[THIS_ARG:.*]]: !cir.ptr<![[REC_LAM_A:.*]]> {{.*}})
+// CIR:   %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_A]]>>, ["this", init]
+// CIR:   %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR:   cir.store{{.*}} %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:   %[[THIS:.*]] = cir.load{{.*}} %[[THIS_ADDR]]
+// CIR:   %[[STRUCT_A:.*]] = cir.get_member %[[THIS]][0] {name = "this"}
+// CIR:   %[[A_A_ADDR:.*]] = cir.get_member %[[STRUCT_A]][0] {name = "a"}
+// CIR:   %[[A_A:.*]] = cir.load{{.*}} %[[A_A_ADDR]]
+// CIR:   cir.store{{.*}} %[[A_A]], %[[RETVAL]]
+// CIR:   %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
+// CIR:   cir.return %[[RET]]
+
+// LLVM: define linkonce_odr i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[THIS_ALLOCA:.*]]  = alloca ptr
+// LLVM:   %[[RETVAL:.*]] = alloca i32
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[PTR_A:.*]] = getelementptr %[[REC_LAM_A:.*]], ptr %[[THIS]], i32 0, i32 0
+// LLVM:   %[[A_A_ADDR:.*]] = getelementptr %struct.A, ptr %[[PTR_A]], i32 0, i32 0
+// LLVM:   %[[A_A:.*]] = load i32, ptr %[[A_A_ADDR]]
+// LLVM:   store i32 %[[A_A]], ptr %[[RETVAL]]
+// LLVM:   %[[RET:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM:   ret i32 %[[RET]]
+
+// The function above is defined after _ZN1A3barEv in OGCG, see below.
+
+// A::foo()
+// CIR: cir.func {{.*}} @_ZN1A3fooEv(%[[THIS_ARG:.*]]: !cir.ptr<!rec_A> {{.*}}) -> !s32i
+// CIR:   %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["this", init]
+// CIR:   %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR:   cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:   %[[THIS]] = cir.load deref %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
+// CIR:   %[[SCOPE_RET:.*]] = cir.scope {
+// CIR:     %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_A]], !cir.ptr<![[REC_LAM_A]]>, ["ref.tmp0"]
+// CIR:     %[[STRUCT_A:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_A]]> -> !cir.ptr<!rec_A>
+// CIR:     cir.call @_ZN1AC1ERKS_(%[[STRUCT_A]], %[[THIS]]){{.*}} : (!cir.ptr<!rec_A>, !cir.ptr<!rec_A>){{.*}} -> ()
+// CIR:     %[[LAM_RET:.*]] = cir.call @_ZZN1A3fooEvENKUlvE_clEv(%[[LAM_ADDR]])
+// CIR:     cir.yield %[[LAM_RET]]
+// CIR:   }
+// CIR:   cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
+// CIR:   %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
+// CIR:   cir.return %[[RET]]
+
+// LLVM: define linkonce_odr i32 @_ZN1A3fooEv(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[LAM_ALLOCA:.*]] =  alloca %[[REC_LAM_A]]
+// LLVM:   %[[THIS_ALLOCA:.*]] = alloca ptr
+// LLVM:   %[[RETVAL:.*]] = alloca i32
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// LLVM:   br label %[[SCOPE_BB:.*]]
+// LLVM: [[SCOPE_BB]]:
+// LLVM:   %[[STRUCT_A:.*]] = getelementptr %[[REC_LAM_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
+// LLVM:   call void @_ZN1AC1ERKS_(ptr %[[STRUCT_A]], ptr %[[THIS]])
+// LLVM:   %[[LAM_RET:.*]] = call i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM:   br label %[[RET_BB:.*]]
+// LLVM: [[RET_BB]]:
+// LLVM:   %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
+// LLVM:   store i32 %[[RETPHI]], ptr %[[RETVAL]]
+// LLVM:   %[[RET:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM:   ret i32 %[[RET]]
+
+// OGCG: define linkonce_odr noundef i32 @_ZN1A3fooEv(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ALLOCA:.*]] = alloca ptr
+// OGCG:   %[[LAM_ALLOCA:.*]] =  alloca %[[REC_LAM_A:.*]],
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[STRUCT_A:.*]] = getelementptr inbounds nuw %[[REC_LAM_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
+// OGCG:   call void @llvm.memcpy.p0.p0.i64(ptr {{.*}} %[[STRUCT_A]], ptr {{.*}} %[[THIS]], i64 4, i1 false)
+// OGCG:   %[[LAM_RET:.*]] = call noundef i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr {{.*}} %[[LAM_ALLOCA]])
+// OGCG:   ret i32 %[[LAM_RET]]
+
+// lambda operator() in bar()
+// CIR: cir.func {{.*}} @_ZZN1A3barEvENKUlvE_clEv(%[[THIS_ARG2:.*]]: !cir.ptr<![[REC_LAM_PTR_A:.*]]> {{.*}}) -> !s32i
+// CIR:   %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<![[REC_LAM_PTR_A]]>, !cir.ptr<!cir.ptr<![[REC_LAM_PTR_A]]>>, ["this", init]
+// CIR:   %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR:   cir.store{{.*}} %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:   %[[THIS:.*]] = cir.load{{.*}} %[[THIS_ADDR]]
+// CIR:   %[[STRUCT_A_ADDR_ADDR:.*]] = cir.get_member %[[THIS]][0] {name = "this"}
+// CIR:   %[[STRUCT_A_ADDR:.*]] = cir.load{{.*}} %[[STRUCT_A_ADDR_ADDR]]
+// CIR:   %[[A_A_ADDR:.*]] = cir.get_member %[[STRUCT_A_ADDR]][0] {name = "a"}
+// CIR:   %[[A_A:.*]] = cir.load{{.*}} %[[A_A_ADDR]]
+// CIR:   cir.store{{.*}} %[[A_A]], %[[RETVAL]]
+// CIR:   %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
+// CIR:   cir.return %[[RET]]
+
+// LLVM: define linkonce_odr i32 @_ZZN1A3barEvENKUlvE_clEv(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[THIS_ALLOCA:.*]]  = alloca ptr
+// LLVM:   %[[RETVAL:.*]] = alloca i32
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[STRUCT_A_ADDRR_ADDR:.*]] = getelementptr %[[REC_LAM_PTR_A:.*]], ptr %[[THIS]], i32 0, i32 0
+// LLVM:   %[[STRUCT_A_ADDR:.*]] = load ptr, ptr %[[STRUCT_A_ADDRR_ADDR]]
+// LLVM:   %[[A_A_ADDR:.*]] = getelementptr %struct.A, ptr %[[STRUCT_A_ADDR]], i32 0, i32 0
+// LLVM:   %[[A_A:.*]] = load i32, ptr %[[A_A_ADDR]]
+// LLVM:   store i32 %[[A_A]], ptr %[[RETVAL]]
+// LLVM:   %[[RET:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM:   ret i32 %[[RET]]
+
+// The function above is defined after _ZZN1A3fooEvENKUlvE_clEv in OGCG, see below.
+
+// A::bar()
+// CIR: cir.func {{.*}} @_ZN1A3barEv(%[[THIS_ARG:.*]]: !cir.ptr<!rec_A> {{.*}}) -> !s32i
+// CIR:   %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["this", init]
+// CIR:   %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR:   cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:   %[[THIS]] = cir.load %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
+// CIR:   %[[SCOPE_RET:.*]] = cir.scope {
+// CIR:     %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_PTR_A]], !cir.ptr<![[REC_LAM_PTR_A]]>, ["ref.tmp0"]
+// CIR:     %[[A_ADDR_ADDR:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_PTR_A]]> -> !cir.ptr<!cir.ptr<!rec_A>>
+// CIR:     cir.store{{.*}} %[[THIS]], %[[A_ADDR_ADDR]]
+// CIR:     %[[LAM_RET:.*]] = cir.call @_ZZN1A3barEvENKUlvE_clEv(%[[LAM_ADDR]])
+// CIR:     cir.yield %[[LAM_RET]]
+// CIR:   }
+// CIR:   cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
+// CIR:   %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
+// CIR:   cir.return %[[RET]]
+
+// LLVM: define linkonce_odr i32 @_ZN1A3barEv(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[LAM_ALLOCA:.*]] =  alloca %[[REC_LAM_PTR_A]]
+// LLVM:   %[[THIS_ALLOCA:.*]] = alloca ptr
+// LLVM:   %[[RETVAL:.*]] = alloca i32
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// LLVM:   br label %[[SCOPE_BB:.*]]
+// LLVM: [[SCOPE_BB]]:
+// LLVM:   %[[A_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_PTR_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
+// LLVM:   store ptr %[[THIS]], ptr %[[A_ADDR_ADDR]]
+// LLVM:   %[[LAM_RET:.*]] = call i32 @_ZZN1A3barEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM:   br label %[[RET_BB:.*]]
+// LLVM: [[RET_BB]]:
+// LLVM:   %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
+// LLVM:   store i32 %[[RETPHI]], ptr %[[RETVAL]]
+// LLVM:   %[[RET:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM:   ret i32 %[[RET]]
+
+// OGCG: define linkonce_odr noundef i32 @_ZN1A3barEv(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ALLOCA:.*]] = alloca ptr
+// OGCG:   %[[LAM_ALLOCA:.*]] =  alloca %[[REC_LAM_PTR_A:.*]],
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[STRUCT_A:.*]] = getelementptr inbounds nuw %[[REC_LAM_PTR_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
+// OGCG:   store ptr %[[THIS]], ptr %[[STRUCT_A]]
+// OGCG:   %[[LAM_RET:.*]] = call noundef i32 @_ZZN1A3barEvENKUlvE_clEv(ptr {{.*}} %[[LAM_ALLOCA]])
+// OGCG:   ret i32 %[[LAM_RET]]
+
+// OGCG: define linkonce_odr noundef i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ALLOCA:.*]]  = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[PTR_A:.*]] = getelementptr inbounds nuw %[[REC_LAM_A]], ptr %[[THIS]], i32 0, i32 0
+// OGCG:   %[[A_A_ADDR:.*]] = getelementptr inbounds nuw %struct.A, ptr %[[PTR_A]], i32 0, i32 0
+// OGCG:   %[[A_A:.*]] = load i32, ptr %[[A_A_ADDR]]
+// OGCG:   ret i32 %[[A_A]]
+
+// OGCG: define linkonce_odr noundef i32 @_ZZN1A3barEvENKUlvE_clEv(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ALLOCA:.*]]  = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]]
+// OGCG:   %[[A_ADDR_ADDR:.*]] = getelementptr inbounds nuw %[[REC_LAM_PTR_A]], ptr %[[THIS]], i32 0, i32 0
+// OGCG:   %[[A_ADDR:.*]] = load ptr, ptr %[[A_ADDR_ADDR]]
+// OGCG:   %[[A_A_ADDR:.*]] = getelementptr inbounds nuw %struct.A, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG:   %[[A_A:.*]] = load i32, ptr %[[A_A_ADDR]]
+// OGCG:   ret i32 %[[A_A]]
+
+int test_lambda_this1(){
+  struct A clsA;
+  int x = clsA.foo();
+  int y = clsA.bar();
+  return x+y;
+}
+
+// CIR: cir.func {{.*}} @_Z17test_lambda_this1v
+// CIR:   cir.call @_ZN1AC1Ev(%[[A_THIS:.*]]){{.*}} : (!cir.ptr<!rec_A>) -> ()
+// CIR:   cir.call @_ZN1A3fooEv(%[[A_THIS]]){{.*}} : (!cir.ptr<!rec_A>) -> !s32i
+// CIR:   cir.call @_ZN1A3barEv(%[[A_THIS]]){{.*}} : (!cir.ptr<!rec_A>) -> !s32i
+
+// LLVM: define {{.*}} i32 @_Z17test_lambda_this1v
+// LLVM:   %[[A_THIS:.*]] = alloca %struct.A
+// LLVM:   call void @_ZN1AC1Ev(ptr %[[A_THIS]])
+// LLVM:   call i32 @_ZN1A3fooEv(ptr %[[A_THIS]])
+// LLVM:   call i32 @_ZN1A3barEv(ptr %[[A_THIS]])
+
+// The function above is define before lambda operator() in foo() in OGCG, see above.


        


More information about the cfe-commits mailing list