[clang] [clang][NFC] Create CodeGenShared component for shared code with ClangIR (PR #157936)
Amr Hesham via cfe-commits
cfe-commits at lists.llvm.org
Sun Sep 21 10:57:06 PDT 2025
https://github.com/AmrDeveloper updated https://github.com/llvm/llvm-project/pull/157936
>From e9b8c1220b2b42162dd7a654b1bc9ed6182bca12 Mon Sep 17 00:00:00 2001
From: AmrDeveloper <amr96 at programmer.net>
Date: Sun, 21 Sep 2025 13:04:32 +0200
Subject: [PATCH] [CIR] Upstream RTTI Builder & RTTI for VTable Definitions
---
clang/lib/CIR/CodeGen/CIRGenBuilder.h | 5 +
clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 3 +
clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 976 ++++++++++++++++++
clang/lib/CIR/CodeGen/CIRGenModule.cpp | 49 +-
clang/lib/CIR/CodeGen/CIRGenModule.h | 29 +
clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 43 +
clang/lib/CIR/CodeGen/CIRGenVTables.h | 2 +
.../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 10 +-
clang/test/CIR/CodeGen/vtable-rtti.cpp | 503 +++++++++
9 files changed, 1614 insertions(+), 6 deletions(-)
create mode 100644 clang/test/CIR/CodeGen/vtable-rtti.cpp
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 6a1746a7ad0ac..b76a15ded641b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -89,6 +89,11 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
return cir::ConstRecordAttr::get(sTy, arrayAttr);
}
+ cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) {
+ auto anonRecord = getAnonConstRecord(fieldsAttr);
+ return cir::TypeInfoAttr::get(anonRecord.getType(), fieldsAttr);
+ }
+
std::string getUniqueAnonRecordName() { return getUniqueRecordName("anon"); }
std::string getUniqueRecordName(const std::string &baseName) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index ae922599809b8..1dee77425c30d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -114,6 +114,9 @@ class CIRGenCXXABI {
virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
+ virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
+ QualType ty) = 0;
+
/// Get the type of the implicit "this" parameter used by a method. May return
/// zero if no specific type is applicable, e.g. if the ABI expects the "this"
/// parameter to point to some artificial offset in a complete object due to
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 0bf6cf556787c..4710ace5ef6ba 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -103,6 +103,9 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI {
const CXXRecordDecl *rd) override;
void emitVirtualInheritanceTables(const CXXRecordDecl *rd) override;
+ mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
+ QualType ty) override;
+
bool doStructorsInitializeVPtrs(const CXXRecordDecl *vtableClass) override {
return true;
}
@@ -424,6 +427,979 @@ void CIRGenItaniumCXXABI::emitVirtualInheritanceTables(
vtables.emitVTTDefinition(vtt, cgm.getVTableLinkage(rd), rd);
}
+namespace {
+class CIRGenItaniumRTTIBuilder {
+ CIRGenModule &cgm; // Per-module state.
+ const CIRGenItaniumCXXABI &cxxABI; // Per-module state.
+
+ /// The fields of the RTTI descriptor currently being built.
+ SmallVector<mlir::Attribute, 16> fields;
+
+ // Returns the mangled type name of the given type.
+ cir::GlobalOp getAddrOfTypeName(mlir::Location loc, QualType ty,
+ cir::GlobalLinkageKind linkage);
+
+ // /// descriptor of the given type.
+ mlir::Attribute getAddrOfExternalRTTIDescriptor(mlir::Location loc,
+ QualType Ty);
+
+ /// Build the vtable pointer for the given type.
+ void buildVTablePointer(mlir::Location loc, const Type *ty);
+
+ /// Build an abi::__si_class_type_info, used for single inheritance, according
+ /// to the Itanium C++ ABI, 2.9.5p6b.
+ void buildSIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd);
+
+ /// Build an abi::__vmi_class_type_info, used for
+ /// classes with bases that do not satisfy the abi::__si_class_type_info
+ /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+ void buildVMIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd);
+
+public:
+ CIRGenItaniumRTTIBuilder(const CIRGenItaniumCXXABI &abi, CIRGenModule &_cgm)
+ : cgm(_cgm), cxxABI(abi) {}
+
+ /// Build the RTTI type info struct for the given type, or
+ /// link to an existing RTTI descriptor if one already exists.
+ mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty);
+
+ /// Build the RTTI type info struct for the given type.
+ mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty,
+ cir::GlobalLinkageKind linkage,
+ mlir::SymbolTable::Visibility visibility);
+};
+} // namespace
+
+// TODO(cir): Will be removed after sharing them with the classical codegen
+namespace {
+
+// Pointer type info flags.
+enum {
+ /// PTI_Const - Type has const qualifier.
+ PTI_Const = 0x1,
+
+ /// PTI_Volatile - Type has volatile qualifier.
+ PTI_Volatile = 0x2,
+
+ /// PTI_Restrict - Type has restrict qualifier.
+ PTI_Restrict = 0x4,
+
+ /// PTI_Incomplete - Type is incomplete.
+ PTI_Incomplete = 0x8,
+
+ /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+ /// (in pointer to member).
+ PTI_ContainingClassIncomplete = 0x10,
+
+ /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
+ // PTI_TransactionSafe = 0x20,
+
+ /// PTI_Noexcept - Pointee is noexcept function (C++1z).
+ PTI_Noexcept = 0x40,
+};
+
+// VMI type info flags.
+enum {
+ /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+ VMI_NonDiamondRepeat = 0x1,
+
+ /// VMI_DiamondShaped - Class is diamond shaped.
+ VMI_DiamondShaped = 0x2
+};
+
+// Base class type info flags.
+enum {
+ /// BCTI_Virtual - Base class is virtual.
+ BCTI_Virtual = 0x1,
+
+ /// BCTI_Public - Base class is public.
+ BCTI_Public = 0x2
+};
+
+/// Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+/// TODO(cir): this can unified with LLVM codegen
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
+ // Itanium C++ ABI 2.9.2:
+ // Basic type information (e.g. for "int", "bool", etc.) will be kept in
+ // the run-time support library. Specifically, the run-time support
+ // library should contain type_info objects for the types X, X* and
+ // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
+ // unsigned char, signed char, short, unsigned short, int, unsigned int,
+ // long, unsigned long, long long, unsigned long long, float, double,
+ // long double, char16_t, char32_t, and the IEEE 754r decimal and
+ // half-precision floating point types.
+ //
+ // GCC also emits RTTI for __int128.
+ // FIXME: We do not emit RTTI information for decimal types here.
+
+ // Types added here must also be added to EmitFundamentalRTTIDescriptors.
+ switch (Ty->getKind()) {
+ case BuiltinType::WasmExternRef:
+ case BuiltinType::HLSLResource:
+ llvm_unreachable("NYI");
+ case BuiltinType::Void:
+ case BuiltinType::NullPtr:
+ case BuiltinType::Bool:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Float16:
+ case BuiltinType::Float128:
+ case BuiltinType::Ibm128:
+ case BuiltinType::Char8:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return true;
+
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
+ case BuiltinType::OCLSampler:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLReserveID:
+#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AArch64ACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
+ return false;
+
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("asking for RRTI for a placeholder type!");
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ llvm_unreachable("FIXME: Objective-C types are unsupported!");
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
+ if (!BuiltinTy)
+ return false;
+
+ // Check the qualifiers.
+ Qualifiers Quals = PointeeTy.getQualifiers();
+ Quals.removeConst();
+
+ if (!Quals.empty())
+ return false;
+
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+}
+
+/// IsStandardLibraryRTTIDescriptor - Returns whether the type
+/// information for the given type exists in the standard library.
+static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
+ // Type info for builtin types is defined in the standard library.
+ if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+
+ // Type info for some pointer types to builtin types is defined in the
+ // standard library.
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return TypeInfoIsInStandardLibrary(PointerTy);
+
+ return false;
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit. Assumes that it is not a
+/// standard-library type.
+static bool ShouldUseExternalRTTIDescriptor(CIRGenModule &CGM, QualType Ty) {
+ ASTContext &Context = CGM.getASTContext();
+
+ // If RTTI is disabled, assume it might be disabled in the
+ // translation unit that defines any potential key function, too.
+ if (!Context.getLangOpts().RTTI)
+ return false;
+
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())->getDefinitionOrSelf();
+ if (!RD->hasDefinition())
+ return false;
+
+ if (!RD->isDynamicClass())
+ return false;
+
+ // FIXME: this may need to be reconsidered if the key function
+ // changes.
+ // N.B. We must always emit the RTTI data ourselves if there exists a key
+ // function.
+ bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
+
+ // Don't import the RTTI but emit it locally.
+ if (CGM.getTriple().isOSCygMing())
+ return false;
+
+ if (CGM.getVTables().isVTableExternal(RD)) {
+ if (CGM.getTarget().hasPS4DLLImportExport())
+ return true;
+
+ return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
+ ? false
+ : true;
+ }
+ if (IsDLLImport)
+ return true;
+ }
+
+ return false;
+}
+
+namespace {
+/// Contains virtual and non-virtual bases seen when traversing a class
+/// hierarchy.
+struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+};
+} // namespace
+
+/// Compute the value of the flags member in abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
+ SeenBases &Bases) {
+
+ unsigned Flags = 0;
+ auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
+
+ if (Base->isVirtual()) {
+ // Mark the virtual base as seen.
+ if (!Bases.VirtualBases.insert(BaseDecl).second) {
+ // If this virtual base has been seen before, then the class is diamond
+ // shaped.
+ Flags |= VMI_DiamondShaped;
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl))
+ Flags |= VMI_NonDiamondRepeat;
+ }
+ } else {
+ // Mark the non-virtual base as seen.
+ if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
+ // If this non-virtual base has been seen before, then the class has non-
+ // diamond shaped repeated inheritance.
+ Flags |= VMI_NonDiamondRepeat;
+ } else {
+ if (Bases.VirtualBases.count(BaseDecl))
+ Flags |= VMI_NonDiamondRepeat;
+ }
+ }
+
+ // Walk all bases.
+ for (const auto &I : BaseDecl->bases())
+ Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
+
+ return Flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
+ unsigned Flags = 0;
+ SeenBases Bases;
+
+ // Walk all bases.
+ for (const auto &I : RD->bases())
+ Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
+
+ return Flags;
+}
+
+// Return whether the given record decl has a "single,
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+// TODO(cir): this can unified with LLVM codegen
+static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
+ // Check the number of bases.
+ if (RD->getNumBases() != 1)
+ return false;
+
+ // Get the base.
+ CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
+
+ // Check that the base is not virtual.
+ if (Base->isVirtual())
+ return false;
+
+ // Check that the base is public.
+ if (Base->getAccessSpecifier() != AS_public)
+ return false;
+
+ // Check that the class is dynamic iff the base is.
+ auto *BaseDecl = Base->getType()->castAsCXXRecordDecl();
+ if (!BaseDecl->isEmpty() &&
+ BaseDecl->isDynamicClass() != RD->isDynamicClass())
+ return false;
+
+ return true;
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is incomplete.
+static bool IsIncompleteClassType(const RecordType *recordTy) {
+ return !recordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isCompleteDefinition();
+}
+
+/// Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+/// * The given type is an incomplete class type.
+/// * The given type is a pointer type whose pointee type contains an
+/// incomplete class type.
+/// * The given type is a member pointer type whose class is an incomplete
+/// class type.
+/// * The given type is a member pointer type whoise pointee type contains an
+/// incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType ty) {
+ if (const auto *RecordTy = dyn_cast<RecordType>(ty)) {
+ if (IsIncompleteClassType(RecordTy))
+ return true;
+ }
+
+ if (const auto *PointerTy = dyn_cast<PointerType>(ty))
+ return ContainsIncompleteClassType(PointerTy->getPointeeType());
+
+ if (const auto *MemberPointerTy = dyn_cast<MemberPointerType>(ty)) {
+ // Check if the class type is incomplete.
+ if (!MemberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition())
+ return true;
+
+ return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
+ }
+
+ return false;
+}
+
+/// Return the linkage that the type info and type info name constants
+/// should have for the given type.
+static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &cgm,
+ QualType ty) {
+ // In addition, it and all of the intermediate abi::__pointer_type_info
+ // structs in the chain down to the abi::__class_type_info for the
+ // incomplete class type must be prevented from resolving to the
+ // corresponding type_info structs for the complete class type, possibly
+ // by making them local static objects. Finally, a dummy class RTTI is
+ // generated for the incomplete type that will not resolve to the final
+ // complete class RTTI (because the latter need not exist), possibly by
+ // making it a local static object.
+ if (ContainsIncompleteClassType(ty))
+ return cir::GlobalLinkageKind::InternalLinkage;
+
+ switch (ty->getLinkage()) {
+ case Linkage::Invalid:
+ llvm_unreachable("Linkage hasn't been computed!");
+
+ case Linkage::None:
+ case Linkage::Internal:
+ case Linkage::UniqueExternal:
+ return cir::GlobalLinkageKind::InternalLinkage;
+
+ case Linkage::VisibleNone:
+ case Linkage::Module:
+ case Linkage::External:
+ // RTTI is not enabled, which means that this type info struct is going
+ // to be used for exception handling. Give it linkonce_odr linkage.
+ if (!cgm.getLangOpts().RTTI)
+ return cir::GlobalLinkageKind::LinkOnceODRLinkage;
+
+ if (const RecordType *record = dyn_cast<RecordType>(ty)) {
+ const CXXRecordDecl *rd =
+ cast<CXXRecordDecl>(record->getOriginalDecl())->getDefinitionOrSelf();
+ if (rd->hasAttr<WeakAttr>())
+ return cir::GlobalLinkageKind::WeakODRLinkage;
+
+ if (cgm.getTriple().isWindowsItaniumEnvironment())
+ if (rd->hasAttr<DLLImportAttr>() &&
+ ShouldUseExternalRTTIDescriptor(cgm, ty))
+ return cir::GlobalLinkageKind::ExternalLinkage;
+
+ // MinGW always uses LinkOnceODRLinkage for type info.
+ if (rd->isDynamicClass() && !cgm.getASTContext()
+ .getTargetInfo()
+ .getTriple()
+ .isWindowsGNUEnvironment())
+ return cgm.getVTableLinkage(rd);
+ }
+
+ return cir::GlobalLinkageKind::LinkOnceODRLinkage;
+ }
+
+ llvm_unreachable("Invalid linkage!");
+}
+} // namespace
+
+// FIXME: Check please
+cir::GlobalOp
+CIRGenItaniumRTTIBuilder::getAddrOfTypeName(mlir::Location loc, QualType ty,
+ cir::GlobalLinkageKind linkage) {
+ auto &builder = cgm.getBuilder();
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ cgm.getCXXABI().getMangleContext().mangleCXXRTTIName(ty, Out);
+
+ // We know that the mangled name of the type starts at index 4 of the
+ // mangled name of the typename, so we can just index into it in order to
+ // get the mangled name of the type.
+ auto Init = builder.getString(Name.substr(4),
+ cgm.convertType(cgm.getASTContext().CharTy),
+ Name.substr(4).size());
+
+ auto Align =
+ cgm.getASTContext().getTypeAlignInChars(cgm.getASTContext().CharTy);
+
+ // builder.getString can return a #cir.zero if the string given to it only
+ // contains null bytes. However, type names cannot be full of null bytes.
+ // So cast Init to a ConstArrayAttr should be safe.
+ auto InitStr = cast<cir::ConstArrayAttr>(Init);
+
+ cir::GlobalOp GV = cgm.createOrReplaceCXXRuntimeVariable(
+ loc, Name, InitStr.getType(), linkage, Align);
+ CIRGenModule::setInitializer(GV, Init);
+ return GV;
+}
+
+mlir::Attribute
+CIRGenItaniumRTTIBuilder::getAddrOfExternalRTTIDescriptor(mlir::Location loc,
+ QualType Ty) {
+ // Mangle the RTTI name.
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ cgm.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ auto &builder = cgm.getBuilder();
+
+ // Look for an existing global.
+ auto GV = dyn_cast_or_null<cir::GlobalOp>(
+ mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), Name));
+
+ if (!GV) {
+ // Create a new global variable.
+ // From LLVM codegen => Note for the future: If we would ever like to do
+ // deferred emission of RTTI, check if emitting vtables opportunistically
+ // need any adjustment.
+ GV = CIRGenModule::createGlobalOp(cgm, loc, Name, builder.getUInt8PtrTy(),
+ /*isConstant=*/true);
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ cgm.setGVProperties(GV, RD);
+
+ // Import the typeinfo symbol when all non-inline virtual methods are
+ // imported.
+ if (cgm.getTarget().hasPS4DLLImportExport())
+ llvm_unreachable("NYI");
+ }
+
+ return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV);
+}
+
+// FIXME: Split this function
+void CIRGenItaniumRTTIBuilder::buildVTablePointer(mlir::Location loc,
+ const Type *Ty) {
+ auto &builder = cgm.getBuilder();
+
+ // abi::__class_type_info.
+ static const char *const ClassTypeInfo =
+ "_ZTVN10__cxxabiv117__class_type_infoE";
+ // abi::__si_class_type_info.
+ static const char *const SIClassTypeInfo =
+ "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ // abi::__vmi_class_type_info.
+ static const char *const VMIClassTypeInfo =
+ "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+
+ const char *VTableName = nullptr;
+
+ switch (Ty->getTypeClass()) {
+ case Type::ArrayParameter:
+ case Type::HLSLAttributedResource:
+ case Type::HLSLInlineSpirv:
+ llvm_unreachable("NYI");
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.inc"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
+ llvm_unreachable("Undeduced type shouldn't get here");
+
+ case Type::Pipe:
+ llvm_unreachable("Pipe types shouldn't get here");
+
+ case Type::Builtin:
+ case Type::BitInt:
+ // GCC treats vector and complex types as fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::Complex:
+ case Type::Atomic:
+ // FIXME: GCC treats block pointers as fundamental types?!
+ case Type::BlockPointer:
+ // abi::__fundamental_type_info.
+ VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // abi::__array_type_info.
+ VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // abi::__function_type_info.
+ VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+ break;
+
+ case Type::Enum:
+ // abi::__enum_type_info.
+ VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
+
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ VTableName = ClassTypeInfo;
+ } else if (CanUseSingleInheritance(RD)) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = VMIClassTypeInfo;
+ }
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ // Ignore protocol qualifiers.
+ Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
+
+ // Handle id and Class.
+ if (isa<BuiltinType>(Ty)) {
+ VTableName = ClassTypeInfo;
+ break;
+ }
+
+ assert(isa<ObjCInterfaceType>(Ty));
+ [[fallthrough]];
+
+ case Type::ObjCInterface:
+ if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = ClassTypeInfo;
+ }
+ break;
+
+ case Type::ObjCObjectPointer:
+ case Type::Pointer:
+ // abi::__pointer_type_info.
+ VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+ break;
+
+ case Type::MemberPointer:
+ // abi::__pointer_to_member_type_info.
+ VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+ break;
+ }
+
+ cir::GlobalOp VTable{};
+
+ // Check if the alias exists. If it doesn't, then get or create the global.
+ if (cgm.getItaniumVTableContext().isRelativeLayout())
+ llvm_unreachable("NYI");
+ if (!VTable) {
+ VTable = cgm.getOrInsertGlobal(loc, VTableName,
+ cgm.getBuilder().getUInt8PtrTy());
+ }
+
+ // The vtable address point is 2.
+ mlir::Attribute field{};
+ if (cgm.getItaniumVTableContext().isRelativeLayout()) {
+ llvm_unreachable("NYI");
+ } else {
+ SmallVector<mlir::Attribute, 4> offsets{
+ cgm.getBuilder().getI32IntegerAttr(2)};
+ auto indices = mlir::ArrayAttr::get(builder.getContext(), offsets);
+ field = cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(),
+ VTable, indices);
+ }
+
+ assert(field && "expected attribute");
+ fields.push_back(field);
+}
+
+/// Build an abi::__si_class_type_info, used for single inheritance, according
+/// to the Itanium C++ ABI, 2.95p6b.
+void CIRGenItaniumRTTIBuilder::buildSIClassTypeInfo(mlir::Location loc,
+ const CXXRecordDecl *rd) {
+ // Itanium C++ ABI 2.9.5p6b:
+ // It adds to abi::__class_type_info a single member pointing to the
+ // type_info structure for the base type,
+ auto baseTypeInfo = CIRGenItaniumRTTIBuilder(cxxABI, cgm)
+ .buildTypeInfo(loc, rd->bases_begin()->getType());
+ fields.push_back(baseTypeInfo);
+}
+
+/// Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info
+/// constraints, according to the Itanium C++ ABI, 2.9.5p5c.
+void CIRGenItaniumRTTIBuilder::buildVMIClassTypeInfo(mlir::Location loc,
+ const CXXRecordDecl *RD) {
+ mlir::Type UnsignedIntLTy =
+ cgm.convertType(cgm.getASTContext().UnsignedIntTy);
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __flags is a word with flags describing details about the class
+ // structure, which may be referenced by using the __flags_masks
+ // enumeration. These flags refer to both direct and indirect bases.
+ unsigned flags = ComputeVMIClassTypeInfoFlags(RD);
+ fields.push_back(cir::IntAttr::get(UnsignedIntLTy, flags));
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_count is a word with the number of direct proper base class
+ // descriptions that follow.
+ fields.push_back(cir::IntAttr::get(UnsignedIntLTy, RD->getNumBases()));
+
+ if (!RD->getNumBases())
+ return;
+
+ // Now add the base class descriptions.
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_info[] is an array of base class descriptions -- one for every
+ // direct proper base. Each description is of the type:
+ //
+ // struct abi::__base_class_type_info {
+ // public:
+ // const __class_type_info *__base_type;
+ // long __offset_flags;
+ //
+ // enum __offset_flags_masks {
+ // __virtual_mask = 0x1,
+ // __public_mask = 0x2,
+ // __offset_shift = 8
+ // };
+ // };
+
+ // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
+ // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
+ // LLP64 platforms.
+ // FIXME: Consider updating libc++abi to match, and extend this logic to all
+ // LLP64 platforms.
+ QualType OffsetFlagsTy = cgm.getASTContext().LongTy;
+ const TargetInfo &TI = cgm.getASTContext().getTargetInfo();
+ if (TI.getTriple().isOSCygMing() &&
+ TI.getPointerWidth(LangAS::Default) > TI.getLongWidth())
+ OffsetFlagsTy = cgm.getASTContext().LongLongTy;
+ mlir::Type OffsetFlagsLTy = cgm.convertType(OffsetFlagsTy);
+
+ for (const auto &Base : RD->bases()) {
+ // The __base_type member points to the RTTI for the base type.
+ fields.push_back(CIRGenItaniumRTTIBuilder(cxxABI, cgm)
+ .buildTypeInfo(loc, Base.getType()));
+
+ auto *BaseDecl = Base.getType()->castAsCXXRecordDecl();
+ int64_t OffsetFlags = 0;
+
+ // All but the lower 8 bits of __offset_flags are a signed offset.
+ // For a non-virtual base, this is the offset in the object of the base
+ // subobject. For a virtual base, this is the offset in the virtual table of
+ // the virtual base offset for the virtual base referenced (negative).
+ CharUnits Offset;
+ if (Base.isVirtual())
+ Offset = cgm.getItaniumVTableContext().getVirtualBaseOffsetOffset(
+ RD, BaseDecl);
+ else {
+ const ASTRecordLayout &Layout =
+ cgm.getASTContext().getASTRecordLayout(RD);
+ Offset = Layout.getBaseClassOffset(BaseDecl);
+ }
+ OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
+
+ // The low-order byte of __offset_flags contains flags, as given by the
+ // masks from the enumeration __offset_flags_masks.
+ if (Base.isVirtual())
+ OffsetFlags |= BCTI_Virtual;
+ if (Base.getAccessSpecifier() == AS_public)
+ OffsetFlags |= BCTI_Public;
+
+ fields.push_back(cir::IntAttr::get(OffsetFlagsLTy, OffsetFlags));
+ }
+}
+
+mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo(mlir::Location loc,
+ QualType ty) {
+ // We want to operate on the canonical type.
+ ty = ty.getCanonicalType();
+
+ // Check if we've already emitted an RTTI descriptor for this type.
+ SmallString<256> name;
+ llvm::raw_svector_ostream out(name);
+ cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out);
+
+ auto oldGV = dyn_cast_or_null<cir::GlobalOp>(
+ mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name));
+
+ if (oldGV && !oldGV.isDeclaration()) {
+ assert(!oldGV.hasAvailableExternallyLinkage() &&
+ "available_externally typeinfos not yet implemented");
+ return cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(),
+ oldGV);
+ }
+
+ // FIXME: Check here
+ // Check if there is already an external RTTI descriptor for this type.
+ if (IsStandardLibraryRTTIDescriptor(ty) ||
+ ShouldUseExternalRTTIDescriptor(cgm, ty))
+ return getAddrOfExternalRTTIDescriptor(loc, ty);
+
+ // Emit the standard library with external linkage.
+ cir::GlobalLinkageKind linkage = getTypeInfoLinkage(cgm, ty);
+
+ // Give the type_info object and name the formal visibility of the
+ // type itself.
+
+ // FIXME: Check here
+ mlir::SymbolTable::Visibility symVisibility;
+ if (cir::isLocalLinkage(linkage))
+ // If the linkage is local, only default visibility makes sense.
+ symVisibility = mlir::SymbolTable::Visibility::Public;
+ else
+ symVisibility = CIRGenModule::getCIRVisibility(ty->getVisibility());
+
+ return buildTypeInfo(loc, ty, linkage, symVisibility);
+}
+
+mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo(
+ mlir::Location loc, QualType ty, cir::GlobalLinkageKind linkage,
+ mlir::SymbolTable::Visibility visibility) {
+ CIRGenBuilderTy &builder = cgm.getBuilder();
+
+ // Add the vtable pointer.
+ buildVTablePointer(loc, cast<Type>(ty));
+
+ // And the name.
+ cir::GlobalOp typeName = getAddrOfTypeName(loc, ty, linkage);
+ mlir::Attribute typeNameField;
+
+ // If we're supposed to demote the visibility, be sure to set a flag
+ // to use a string comparison for type_info comparisons.
+ // FIXME: RTTIUniquenessKind
+
+ typeNameField = builder.getGlobalViewAttr(builder.getUInt8PtrTy(), typeName);
+ fields.push_back(typeNameField);
+
+ switch (ty->getTypeClass()) {
+ case Type::ArrayParameter:
+ case Type::HLSLAttributedResource:
+ case Type::HLSLInlineSpirv:
+ llvm_unreachable("NYI");
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.inc"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ // GCC treats vector types as fundamental types.
+ case Type::Builtin:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::Complex:
+ case Type::BlockPointer:
+ // Itanium C++ ABI 2.9.5p4:
+ // abi::__fundamental_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
+ llvm_unreachable("Undeduced type shouldn't get here");
+
+ case Type::Pipe:
+ break;
+
+ case Type::BitInt:
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__array_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__function_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Enum:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__enum_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Record: {
+ const auto *rd =
+ cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
+ if (!rd->hasDefinition() || !rd->getNumBases()) {
+ // We don't need to emit any fields.
+ break;
+ }
+
+ if (CanUseSingleInheritance(rd)) {
+ buildSIClassTypeInfo(loc, rd);
+ } else {
+ buildVMIClassTypeInfo(loc, rd);
+ }
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ llvm_unreachable("NYI");
+ break;
+
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("NYI");
+ break;
+
+ case Type::Pointer:
+ llvm_unreachable("NYI");
+ break;
+
+ case Type::MemberPointer:
+ llvm_unreachable("NYI");
+ break;
+
+ case Type::Atomic:
+ // No fields, at least for the moment.
+ break;
+ }
+
+ auto init = builder.getTypeInfo(builder.getArrayAttr(fields));
+
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, Out);
+
+ // Create new global and search for an existing global.
+ auto OldGV = dyn_cast_or_null<cir::GlobalOp>(
+ mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), Name));
+ cir::GlobalOp GV =
+ CIRGenModule::createGlobalOp(cgm, loc, Name, init.getType(),
+ /*isConstant=*/true);
+
+ // Export the typeinfo in the same circumstances as the vtable is
+ // exported.
+ if (cgm.getTarget().hasPS4DLLImportExport())
+ llvm_unreachable("NYI");
+
+ // If there's already an old global variable, replace it with the new one.
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV.setName(OldGV.getName());
+ if (!OldGV->use_empty()) {
+ // TODO: replaceAllUsesWith
+ llvm_unreachable("NYI");
+ }
+ OldGV->erase();
+ }
+
+ if (cgm.supportsCOMDAT() && cir::isWeakForLinker(GV.getLinkage())) {
+ llvm_unreachable("NYI");
+ }
+
+ mlir::SymbolTable::setSymbolVisibility(
+ typeName, CIRGenModule::getMLIRVisibility(typeName));
+ CIRGenModule::setInitializer(GV, init);
+
+ return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV);
+}
+
+mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location loc,
+ QualType ty) {
+ return CIRGenItaniumRTTIBuilder(*this, cgm).buildTypeInfo(loc, ty);
+}
+
void CIRGenItaniumCXXABI::emitDestructorCall(
CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index eef23a0ebda7f..ea150a2989c39 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -2171,8 +2171,53 @@ mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc,
if (!shouldEmitRTTI(forEh))
return builder.getConstNullPtrAttr(builder.getUInt8PtrTy());
- errorNYI(loc, "getAddrOfRTTIDescriptor");
- return mlir::Attribute();
+ if (forEh && ty->isObjCObjectPointerType() &&
+ langOpts.ObjCRuntime.isGNUFamily()) {
+ errorNYI(loc, "getAddrOfRTTIDescriptor: Objc PtrType & Objc RT GUN");
+ return {};
+ }
+
+ return getCXXABI().getAddrOfRTTIDescriptor(loc, ty);
+}
+
+/// TODO(cir): once we have cir.module, add this as a convenience method there.
+///
+/// Look up the specified global in the module symbol table.
+/// 1. If it does not exist, add a declaration of the global and return it.
+/// 2. Else, the global exists but has the wrong type: return the function
+/// with a constantexpr cast to the right type.
+/// 3. Finally, if the existing global is the correct declaration, return the
+/// existing global.
+cir::GlobalOp CIRGenModule::getOrInsertGlobal(
+ mlir::Location loc, StringRef name, mlir::Type ty,
+ llvm::function_ref<cir::GlobalOp()> createGlobalCallback) {
+ // See if we have a definition for the specified global already.
+ auto gv = dyn_cast_or_null<cir::GlobalOp>(getGlobalValue(name));
+ if (!gv) {
+ gv = createGlobalCallback();
+ }
+ assert(gv && "The CreateGlobalCallback is expected to create a global");
+
+ // If the variable exists but has the wrong type, return a bitcast to the
+ // right type.
+ auto gvTy = gv.getSymType();
+ assert(!cir::MissingFeatures::addressSpace());
+ auto pTy = builder.getPointerTo(ty);
+
+ if (gvTy != pTy)
+ llvm_unreachable("NYI");
+
+ // Otherwise, we just found the existing function or a prototype.
+ return gv;
+}
+
+// Overload to construct a global variable using its constructor's defaults.
+cir::GlobalOp CIRGenModule::getOrInsertGlobal(mlir::Location loc,
+ StringRef name, mlir::Type ty) {
+ return getOrInsertGlobal(loc, name, ty, [&] {
+ return CIRGenModule::createGlobalOp(*this, loc, name,
+ builder.getPointerTo(ty));
+ });
}
// TODO(cir): this can be shared with LLVM codegen.
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 95a7ac0648bb7..00c79526d28c8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -148,6 +148,23 @@ class CIRGenModule : public CIRGenTypeCache {
cir::GlobalOp getOrCreateCIRGlobal(const VarDecl *d, mlir::Type ty,
ForDefinition_t isForDefinition);
+ /// TODO(cir): once we have cir.module, add this as a convenience method
+ /// there instead of here.
+ ///
+ /// Look up the specified global in the module symbol table.
+ /// 1. If it does not exist, add a declaration of the global and return it.
+ /// 2. Else, the global exists but has the wrong type: return the function
+ /// with a constantexpr cast to the right type.
+ /// 3. Finally, if the existing global is the correct declaration, return
+ /// the existing global.
+ cir::GlobalOp
+ getOrInsertGlobal(mlir::Location loc, llvm::StringRef name, mlir::Type ty,
+ llvm::function_ref<cir::GlobalOp()> createGlobalCallback);
+
+ // Overload to construct a global variable using its constructor's defaults.
+ cir::GlobalOp getOrInsertGlobal(mlir::Location loc, llvm::StringRef name,
+ mlir::Type ty);
+
static cir::GlobalOp createGlobalOp(CIRGenModule &cgm, mlir::Location loc,
llvm::StringRef name, mlir::Type t,
bool isConstant = false,
@@ -250,6 +267,18 @@ class CIRGenModule : public CIRGenTypeCache {
mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType ty,
bool forEH = false);
+ static mlir::SymbolTable::Visibility getCIRVisibility(Visibility v) {
+ switch (v) {
+ case DefaultVisibility:
+ return mlir::SymbolTable::Visibility::Public;
+ case HiddenVisibility:
+ return mlir::SymbolTable::Visibility::Private;
+ case ProtectedVisibility:
+ llvm_unreachable("NYI");
+ }
+ llvm_unreachable("unknown visibility!");
+ }
+
/// Return a constant array for the given string.
mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *e);
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
index af8f5ae2cc0a5..bc001cdd8bfbe 100644
--- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
@@ -47,6 +47,49 @@ cir::RecordType CIRGenVTables::getVTableType(const VTableLayout &layout) {
return cgm.getBuilder().getAnonRecordTy(tys, /*incomplete=*/false);
}
+/// At this point in the translation unit, does it appear that can we
+/// rely on the vtable being defined elsewhere in the program?
+///
+/// The response is really only definitive when called at the end of
+/// the translation unit.
+///
+/// The only semantic restriction here is that the object file should
+/// not contain a vtable definition when that vtable is defined
+/// strongly elsewhere. Otherwise, we'd just like to avoid emitting
+/// vtables when unnecessary.
+/// TODO(cir): this should be merged into common AST helper for codegen.
+bool CIRGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
+ assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable.");
+
+ // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't
+ // emit them even if there is an explicit template instantiation.
+ if (cgm.getTarget().getCXXABI().isMicrosoft())
+ return false;
+
+ // If we have an explicit instantiation declaration (and not a
+ // definition), the vtable is defined elsewhere.
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return true;
+
+ // Otherwise, if the class is an instantiated template, the
+ // vtable must be defined here.
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return false;
+
+ // Otherwise, if the class doesn't have a key function (possibly
+ // anymore), the vtable must be defined here.
+ const CXXMethodDecl *keyFunction =
+ cgm.getASTContext().getCurrentKeyFunction(RD);
+ if (!keyFunction)
+ return false;
+
+ // Otherwise, if we don't have a definition of the key function, the
+ // vtable must be defined somewhere else.
+ return !keyFunction->hasBody();
+}
+
/// This is a callback from Sema to tell us that a particular vtable is
/// required to be emitted in this translation unit.
///
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h
index e19242c651034..9c425ab43b3d9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenVTables.h
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h
@@ -100,6 +100,8 @@ class CIRGenVTables {
/// is enabled) and the VTT (if the class has virtual bases).
void generateClassData(const CXXRecordDecl *rd);
+ bool isVTableExternal(const clang::CXXRecordDecl *rd);
+
/// Returns the type of a vtable with the given layout. Normally a struct of
/// arrays of pointers, with one struct element for each vtable in the vtable
/// group.
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 1865698838134..767bef9cd11ab 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -222,8 +222,9 @@ class CIRAttrToValue {
return llvm::TypeSwitch<mlir::Attribute, mlir::Value>(attr)
.Case<cir::IntAttr, cir::FPAttr, cir::ConstComplexAttr,
cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
- cir::ConstPtrAttr, cir::GlobalViewAttr, cir::VTableAttr,
- cir::ZeroAttr>([&](auto attrT) { return visitCirAttr(attrT); })
+ cir::ConstPtrAttr, cir::GlobalViewAttr, cir::TypeInfoAttr,
+ cir::VTableAttr, cir::ZeroAttr>(
+ [&](auto attrT) { return visitCirAttr(attrT); })
.Default([&](auto attrT) { return mlir::Value(); });
}
@@ -1694,7 +1695,7 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal(
// TODO: Generalize this handling when more types are needed here.
assert((isa<cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
cir::ConstPtrAttr, cir::ConstComplexAttr, cir::GlobalViewAttr,
- cir::VTableAttr, cir::ZeroAttr>(init)));
+ cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>(init)));
// TODO(cir): once LLVM's dialect has proper equivalent attributes this
// should be updated. For now, we use a custom op to initialize globals
@@ -1749,7 +1750,8 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite(
} else if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
cir::ConstRecordAttr, cir::ConstPtrAttr,
cir::ConstComplexAttr, cir::GlobalViewAttr,
- cir::VTableAttr, cir::ZeroAttr>(init.value())) {
+ cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>(
+ init.value())) {
// TODO(cir): once LLVM's dialect has proper equivalent attributes this
// should be updated. For now, we use a custom op to initialize globals
// to the appropriate value.
diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp b/clang/test/CIR/CodeGen/vtable-rtti.cpp
new file mode 100644
index 0000000000000..546ebbdec84e4
--- /dev/null
+++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp
@@ -0,0 +1,503 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+class A {
+public:
+ int a;
+ virtual void v();
+};
+
+class B : public virtual A {
+public:
+ int b;
+ virtual void w();
+};
+
+class C : public virtual A {
+public:
+ long c;
+ virtual void x();
+};
+
+class D : public B, public C {
+public:
+ long d;
+ D();
+ virtual void y();
+};
+
+// This is just here to force the record types to be emitted.
+void f(D *d) {}
+
+// Trigger vtable and VTT emission for D.
+void D::y() {}
+
+// CIR: !rec_A2Ebase = !cir.record<struct "A.base" packed {!cir.vptr, !s32i}>
+// CIR: !rec_B2Ebase = !cir.record<struct "B.base" packed {!cir.vptr, !s32i}>
+// CIR: !rec_C2Ebase = !cir.record<struct "C.base" {!cir.vptr, !s64i}>
+// CIR: !rec_A = !cir.record<class "A" packed padded {!cir.vptr, !s32i, !cir.array<!u8i x 4>}>
+// CIR: !rec_B = !cir.record<class "B" packed padded {!cir.vptr, !s32i, !cir.array<!u8i x 4>, !rec_A2Ebase, !cir.array<!u8i x 4>}>
+// CIR: !rec_C = !cir.record<class "C" {!cir.vptr, !s64i, !rec_A2Ebase}>
+// CIR: !rec_D = !cir.record<class "D" {!rec_B2Ebase, !rec_C2Ebase, !s64i, !rec_A2Ebase}>
+
+// CIR: !rec_anon_struct = !cir.record<struct {!cir.ptr<!u8i>, !cir.ptr<!u8i>, !u32i, !u32i, !cir.ptr<!u8i>, !s64i, !cir.ptr<!u8i>, !s64i}>
+// CIR: !rec_anon_struct1 = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 5>, !cir.array<!cir.ptr<!u8i> x 4>, !cir.array<!cir.ptr<!u8i> x 4>}>
+// CIR: !rec_anon_struct2 = !cir.record<struct {!cir.array<!cir.ptr<!u8i> x 4>, !cir.array<!cir.ptr<!u8i> x 4>}>
+
+// Vtable for D
+
+// CIR: cir.global{{.*}} @_ZTV1D = #cir.vtable<{
+// CIR-SAME: #cir.const_array<[#cir.ptr<40 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1D1yEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 5>,
+// CIR-SAME: #cir.const_array<[#cir.ptr<24 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<-16 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>,
+// CIR-SAME: #cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<-40 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>
+// CIR-SAME: }> : !rec_anon_struct1
+
+// LLVM: @_ZTV1D = global {
+// LLVM-SAME: [5 x ptr], [4 x ptr], [4 x ptr] }
+// LLVM-SAME: { [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv],
+// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv],
+// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1D, ptr @_ZN1A1vEv]
+// LLVM-SAME: }, align 8
+
+// OGCG: @_ZTV1D = unnamed_addr constant {
+// OGCG-SAME: [5 x ptr], [4 x ptr], [4 x ptr] }
+// OGCG-SAME: { [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv],
+// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv],
+// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1D, ptr @_ZN1A1vEv]
+// OGCG-SAME: }, align 8
+
+// VTT for D
+
+// CIR: cir.global{{.*}} @_ZTT1D = #cir.const_array<[
+// CIR-SAME: #cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTC1D0_1B, [1 : i32, 3 : i32]> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTC1D16_1C, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTC1D16_1C, [1 : i32, 3 : i32]> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTV1D, [2 : i32, 3 : i32]> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTV1D, [1 : i32, 3 : i32]> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 7>
+
+// LLVM: @_ZTT1D = global [7 x ptr] [
+// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24),
+// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 24),
+// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 56),
+// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 24),
+// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 56),
+// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96),
+// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64)
+// LLVM-SAME: ], align 8
+
+// OGCG: @_ZTT1D = unnamed_addr constant [7 x ptr] [
+// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3),
+// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 0, i32 3),
+// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 1, i32 3),
+// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 0, i32 3),
+// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 1, i32 3),
+// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3),
+// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3)
+// OGCG-SAME: ], align 8
+
+// Construction vtable for B-in-D
+
+// CIR: cir.global{{.*}} @_ZTC1D0_1B = #cir.vtable<{
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<40 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>,
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<-40 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct2
+
+// LLVM: @_ZTC1D0_1B = global { [4 x ptr], [4 x ptr] } {
+// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1B, ptr @_ZN1B1wEv],
+// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1B, ptr @_ZN1A1vEv]
+// LLVM-SAME: }, align 8
+
+// OGCG: @_ZTC1D0_1B = unnamed_addr constant { [4 x ptr], [4 x ptr] } {
+// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1B, ptr @_ZN1B1wEv],
+// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1B, ptr @_ZN1A1vEv]
+// OGCG-SAME: }, align 8
+
+// CIR: cir.global{{.*}} @_ZTI1B : !cir.ptr<!u8i>
+
+// LLVM: @_ZTI1B = external global ptr
+
+// OGCG: @_ZTI1B = external constant ptr
+
+// Construction vtable for C-in-D
+
+// CIR: cir.global{{.*}} @_ZTC1D16_1C = #cir.vtable<{
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<24 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>,
+// CIR-SAME: #cir.const_array<[
+// CIR-SAME: #cir.ptr<null> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.ptr<-24 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct2
+
+// LLVM: @_ZTC1D16_1C = global { [4 x ptr], [4 x ptr] } {
+// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr @_ZTI1C, ptr @_ZN1C1xEv],
+// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr @_ZTI1C, ptr @_ZN1A1vEv]
+// LLVM-SAME: }, align 8
+
+// OGCG: @_ZTC1D16_1C = unnamed_addr constant { [4 x ptr], [4 x ptr] } {
+// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr @_ZTI1C, ptr @_ZN1C1xEv],
+// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr @_ZTI1C, ptr @_ZN1A1vEv]
+// OGCG-SAME: }, align 8
+
+// CIR: cir.global{{.*}} @_ZTI1C : !cir.ptr<!u8i>
+
+// LLVM: @_ZTI1C = external global ptr
+
+// OGCG: @_ZTI1C = external constant ptr
+
+// RTTI class type info for D
+
+// CIR: cir.globa{{.*}} @_ZTVN10__cxxabiv121__vmi_class_type_infoE : !cir.ptr<!cir.ptr<!u8i>>
+
+// CIR: cir.global{{.*}} @_ZTS1D = #cir.const_array<"1D" : !cir.array<!s8i x 2>> : !cir.array<!s8i x 2>
+
+// CIR: cir.global{{.*}} @_ZTI1D = #cir.typeinfo<{
+// CIR-SAME: #cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.global_view<@_ZTS1D> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.int<2> : !u32i, #cir.int<2> : !u32i,
+// CIR-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.int<2> : !s64i,
+// CIR-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr<!u8i>,
+// CIR-SAME: #cir.int<4098> : !s64i}> : !rec_anon_struct
+
+// CIR: cir.global{{.*}} @_ZTV1A : !rec_anon_struct3
+
+// LLVM: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr
+// LLVM: @_ZTS1D = global [2 x i8] c"1D", align 1
+
+// LLVM: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } {
+// LLVM-SAME: ptr getelementptr (i8, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 16),
+// LLVM-SAME: ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, i64 4098 }
+
+// OGCG: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } {
+// OGCG-SAME: ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2),
+// OGCG-SAME: ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, i64 4098 }, align 8
+
+// OGCG: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global [0 x ptr]
+// OGCG: @_ZTS1D = constant [3 x i8] c"1D\00", align 1
+// OGCG: @_ZTV1A = external unnamed_addr constant { [3 x ptr] }, align 8
+
+D::D() {}
+
+// In CIR, this gets emitted after the B and C constructors. See below.
+// Base (C2) constructor for D
+
+// OGCG: define {{.*}} void @_ZN1DC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: %[[VTT_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// OGCG: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1
+// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} %[[B_VTT]])
+// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 3
+// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} %[[C_VTT]])
+// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// OGCG: store ptr %[[VPTR]], ptr %[[THIS]]
+// OGCG: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 5
+// OGCG: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]]
+// OGCG: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]]
+// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24
+// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]]
+// OGCG: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]]
+// OGCG: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 6
+// OGCG: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]]
+// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG: store ptr %[[C_VPTR]], ptr %[[C_ADDR]]
+
+// Base (C2) constructor for B
+
+// CIR: cir.func {{.*}} @_ZN1BC2Ev
+// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_B>
+// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init]
+// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]]
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
+// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]]
+// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR: cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]]
+// CIR: %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[B_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[B_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]]
+// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]]
+// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24>
+// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
+// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i
+// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_B>), !cir.ptr<!u8i>
+// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
+// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_B>
+// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
+// CIR: cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]]
+
+// LLVM: define {{.*}} void @_ZN1BC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]])
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: %[[VTT_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// LLVM: store ptr %[[VPTR]], ptr %[[THIS]]
+// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1
+// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]]
+// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// OGCG: define {{.*}} void @_ZN1BC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: %[[VTT_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// OGCG: store ptr %[[VPTR]], ptr %[[THIS]]
+// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1
+// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]]
+// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// Base (C2) constructor for C
+
+// CIR: cir.func {{.*}} @_ZN1CC2Ev
+// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_C>
+// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init]
+// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]]
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
+// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]]
+// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR: cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]]
+// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]]
+// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]]
+// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24>
+// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
+// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i
+// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_C>), !cir.ptr<!u8i>
+// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
+// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_C>
+// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
+// CIR: cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]]
+
+// LLVM: define {{.*}} void @_ZN1CC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]])
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: %[[VTT_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// LLVM: store ptr %[[VPTR]], ptr %[[THIS]]
+// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1
+// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]]
+// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// OGCG: define {{.*}} void @_ZN1CC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: %[[VTT_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// OGCG: store ptr %[[VPTR]], ptr %[[THIS]]
+// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1
+// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]]
+// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// Base (C2) constructor for D
+
+// CIR: cir.func {{.*}} @_ZN1DC2Ev
+// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_D>
+// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init]
+// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]]
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
+// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [0] -> !cir.ptr<!rec_B>
+// CIR: %[[B_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR: %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 3 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR: %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
+// CIR: %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR: cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]]
+// CIR: %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 5 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[D_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
+// CIR: %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_D> -> !cir.ptr<!cir.vptr>
+// CIR: %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : !cir.ptr<!cir.vptr>, !cir.vptr
+// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR2]] : !cir.vptr), !cir.ptr<!u8i>
+// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i
+// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
+// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i
+// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_D>), !cir.ptr<!u8i>
+// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
+// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_D>
+// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
+// CIR: cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]]
+// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 6 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr
+// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+
+// The C2 constructor for D gets emitted earlier in OGCG, see above.
+
+// Base (C2) constructor for A
+
+// CIR: cir.func {{.*}} @_ZN1AC2Ev
+// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_A>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR: %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point = <index = 0, offset = 2>) : !cir.vptr
+// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+
+// LLVM: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) {
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr %[[THIS]]
+
+// The C2 constructor for A gets emitted later in OGCG, see below.
+
+// Complete (C1) constructor for D
+
+// CIR: cir.func {{.*}} @_ZN1DC1Ev
+// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_D>
+// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [40] -> !cir.ptr<!rec_A>
+// CIR: cir.call @_ZN1AC2Ev(%[[A_ADDR]]) nothrow : (!cir.ptr<!rec_A>) -> ()
+// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [0] -> !cir.ptr<!rec_B>
+// CIR: %[[B_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR: %[[C_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr<!cir.ptr<!void>>
+// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR: %[[D_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = <index = 0, offset = 3>) : !cir.vptr
+// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_D> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} %[[D_VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+// CIR: %[[A_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = <index = 2, offset = 3>) : !cir.vptr
+// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [40] -> !cir.ptr<!rec_A>
+// CIR: %[[A_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A_ADDR]] : !cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} %[[A_VPTR]], %[[A_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+// CIR: %[[C_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = <index = 1, offset = 3>) : !cir.vptr
+// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr>
+
+// LLVM: define {{.*}} void @_ZN1DC1Ev(ptr %[[THIS_ARG:.*]])
+// LLVM: %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40
+// LLVM: call void @_ZN1AC2Ev(ptr %[[A_ADDR]])
+// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 8))
+// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16
+// LLVM: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 24))
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), ptr %[[THIS]]
+// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), ptr %[[A_ADDR]]
+// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16
+// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64), ptr %[[C_ADDR]]
+
+// OGCG: define {{.*}} void @_ZN1DC1Ev(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40
+// OGCG: call void @_ZN1AC2Ev(ptr {{.*}} %[[A_ADDR]])
+// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 1))
+// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 3))
+// OGCG: store ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), ptr %[[THIS]]
+// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40
+// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), ptr %[[A_ADDR]]
+// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3), ptr %[[C_ADDR]]
+
+// OGCG: define {{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG: %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG: store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]]
More information about the cfe-commits
mailing list