[cfe-commits] r81143 - in /cfe/trunk: lib/CodeGen/CGCXX.cpp lib/CodeGen/Mangle.cpp lib/CodeGen/Mangle.h test/CodeGenCXX/virt.cpp
Mike Stump
mrs at apple.com
Sun Sep 6 21:27:52 PDT 2009
Author: mrs
Date: Sun Sep 6 23:27:52 2009
New Revision: 81143
URL: http://llvm.org/viewvc/llvm-project?rev=81143&view=rev
Log:
Refine vcall offsets. Cleanups. WIP.
Modified:
cfe/trunk/lib/CodeGen/CGCXX.cpp
cfe/trunk/lib/CodeGen/Mangle.cpp
cfe/trunk/lib/CodeGen/Mangle.h
cfe/trunk/test/CodeGenCXX/virt.cpp
Modified: cfe/trunk/lib/CodeGen/CGCXX.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGCXX.cpp?rev=81143&r1=81142&r2=81143&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGCXX.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGCXX.cpp Sun Sep 6 23:27:52 2009
@@ -909,9 +909,7 @@
}
bool OverrideMethod(const CXXMethodDecl *MD, llvm::Constant *m,
- bool MorallyVirtual, Index_t Offset,
- std::vector<llvm::Constant *> &submethods,
- Index_t AddressPoint) {
+ bool MorallyVirtual, Index_t Offset) {
typedef CXXMethodDecl::method_iterator meth_iter;
// FIXME: Don't like the nested loops. For very large inheritance
@@ -927,23 +925,25 @@
om = CGM.GetAddrOfFunction(GlobalDecl(OMD), Ptr8Ty);
om = llvm::ConstantExpr::getBitCast(om, Ptr8Ty);
- for (Index_t i = AddressPoint, e = submethods.size();
+ for (Index_t i = 0, e = submethods.size();
i != e; ++i) {
// FIXME: begin_overridden_methods might be too lax, covariance */
if (submethods[i] != om)
continue;
+ Index[MD] = i;
submethods[i] = m;
- Index[MD] = i - AddressPoint;
Thunks.erase(OMD);
if (MorallyVirtual) {
- VCallOffset[MD] = Offset/8;
Index_t &idx = VCall[OMD];
if (idx == 0) {
+ VCallOffset[MD] = Offset/8;
idx = VCalls.size()+1;
VCalls.push_back(0);
+ } else {
+ VCallOffset[MD] = VCallOffset[OMD];
+ VCalls[idx-1] = -VCallOffset[OMD] + Offset/8;
}
- VCalls[idx] = Offset/8 - VCallOffset[OMD];
VCall[MD] = idx;
// FIXME: 0?
Thunks[MD] = std::make_pair(0, -((idx+extra+2)*LLVMPointerWidth/8));
@@ -975,18 +975,20 @@
Thunks.clear();
}
- void OverrideMethods(std::vector<const CXXRecordDecl *> *Path,
- bool MorallyVirtual, Index_t Offset) {
- for (std::vector<const CXXRecordDecl *>::reverse_iterator i =Path->rbegin(),
+ void OverrideMethods(std::vector<std::pair<const CXXRecordDecl *,
+ int64_t> > *Path, bool MorallyVirtual) {
+ for (std::vector<std::pair<const CXXRecordDecl *,
+ int64_t> >::reverse_iterator i =Path->rbegin(),
e = Path->rend(); i != e; ++i) {
- const CXXRecordDecl *RD = *i;
+ const CXXRecordDecl *RD = i->first;
+ int64_t Offset = i->second;
for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
++mi)
if (mi->isVirtual()) {
const CXXMethodDecl *MD = *mi;
llvm::Constant *m = wrap(CGM.GetAddrOfFunction(GlobalDecl(MD),
Ptr8Ty));
- OverrideMethod(MD, m, MorallyVirtual, Offset, submethods, 0);
+ OverrideMethod(MD, m, MorallyVirtual, Offset);
}
}
}
@@ -994,11 +996,12 @@
void AddMethod(const CXXMethodDecl *MD, bool MorallyVirtual, Index_t Offset) {
llvm::Constant *m = wrap(CGM.GetAddrOfFunction(GlobalDecl(MD), Ptr8Ty));
// If we can find a previously allocated slot for this, reuse it.
- if (OverrideMethod(MD, m, MorallyVirtual, Offset, submethods, 0))
+ if (OverrideMethod(MD, m, MorallyVirtual, Offset))
return;
// else allocate a new slot.
Index[MD] = submethods.size();
+ submethods.push_back(m);
if (MorallyVirtual) {
VCallOffset[MD] = Offset/8;
Index_t &idx = VCall[MD];
@@ -1008,7 +1011,6 @@
VCalls.push_back(0);
}
}
- submethods.push_back(m);
}
void AddMethods(const CXXRecordDecl *RD, bool MorallyVirtual,
@@ -1032,8 +1034,9 @@
if (Base != PrimaryBase || PrimaryBaseWasVirtual) {
uint64_t o = Offset + Layout.getBaseClassOffset(Base);
StartNewTable();
- std::vector<const CXXRecordDecl *> S;
- S.push_back(RD);
+ std::vector<std::pair<const CXXRecordDecl *,
+ int64_t> > S;
+ S.push_back(std::make_pair(RD, Offset));
GenerateVtableForBase(Base, MorallyVirtual, o, false, &S);
}
}
@@ -1055,8 +1058,9 @@
}
// The vcalls come first...
- for (std::vector<Index_t>::iterator i=VCalls.begin(), e=VCalls.end();
- i < e; ++i)
+ for (std::vector<Index_t>::reverse_iterator i=VCalls.rbegin(),
+ e=VCalls.rend();
+ i != e; ++i)
methods.push_back(wrap((0?600:0) + *i));
VCalls.clear();
@@ -1103,7 +1107,8 @@
int64_t GenerateVtableForBase(const CXXRecordDecl *RD,
bool MorallyVirtual = false, int64_t Offset = 0,
bool ForVirtualBase = false,
- std::vector<const CXXRecordDecl *> *Path = 0) {
+ std::vector<std::pair<const CXXRecordDecl *,
+ int64_t> > *Path = 0) {
if (!RD->isDynamicClass())
return 0;
@@ -1128,22 +1133,25 @@
AddMethods(RD, MorallyVirtual, Offset);
if (Path)
- OverrideMethods(Path, MorallyVirtual, Offset);
+ OverrideMethods(Path, MorallyVirtual);
return end(RD, offsets, Layout, PrimaryBase, PrimaryBaseWasVirtual,
MorallyVirtual, Offset, ForVirtualBase);
}
void GenerateVtableForVBases(const CXXRecordDecl *RD,
- std::vector<const CXXRecordDecl *> *Path = 0) {
+ int64_t Offset = 0,
+ std::vector<std::pair<const CXXRecordDecl *,
+ int64_t> > *Path = 0) {
bool alloc = false;
if (Path == 0) {
alloc = true;
- Path = new std::vector<const CXXRecordDecl *>;
+ Path = new std::vector<std::pair<const CXXRecordDecl *,
+ int64_t> >;
}
// FIXME: We also need to override using all paths to a virtual base,
// right now, we just process the first path
- Path->push_back(RD);
+ Path->push_back(std::make_pair(RD, Offset));
for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
e = RD->bases_end(); i != e; ++i) {
const CXXRecordDecl *Base =
@@ -1155,8 +1163,11 @@
int64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
GenerateVtableForBase(Base, true, BaseOffset, true, Path);
}
+ int64_t BaseOffset = Offset;
+ if (i->isVirtual())
+ BaseOffset = BLayout.getVBaseClassOffset(Base);
if (Base->getNumVBases())
- GenerateVtableForVBases(Base, Path);
+ GenerateVtableForVBases(Base, BaseOffset, Path);
}
Path->pop_back();
if (alloc)
Modified: cfe/trunk/lib/CodeGen/Mangle.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/Mangle.cpp?rev=81143&r1=81142&r2=81143&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/Mangle.cpp (original)
+++ cfe/trunk/lib/CodeGen/Mangle.cpp Sun Sep 6 23:27:52 2009
@@ -41,8 +41,8 @@
bool mangle(const NamedDecl *D);
void mangleCalloffset(int64_t nv, int64_t v);
- void mangleThunk(const NamedDecl *ND, int64_t nv, int64_t v);
- void mangleCovariantThunk(const NamedDecl *ND,
+ void mangleThunk(const FunctionDecl *FD, int64_t nv, int64_t v);
+ void mangleCovariantThunk(const FunctionDecl *FD,
int64_t nv_t, int64_t v_t,
int64_t nv_r, int64_t v_r);
void mangleGuardVariable(const VarDecl *D);
@@ -274,25 +274,26 @@
Out << "_";
}
-void CXXNameMangler::mangleThunk(const NamedDecl *D, int64_t nv, int64_t v) {
+void CXXNameMangler::mangleThunk(const FunctionDecl *FD, int64_t nv,
+ int64_t v) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
- Out << "_T";
+ Out << "_ZT";
mangleCalloffset(nv, v);
- mangleName(D);
+ mangleFunctionEncoding(FD);
}
- void CXXNameMangler::mangleCovariantThunk(const NamedDecl *D,
+ void CXXNameMangler::mangleCovariantThunk(const FunctionDecl *FD,
int64_t nv_t, int64_t v_t,
int64_t nv_r, int64_t v_r) {
// <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
// # base is the nominal target function of thunk
// # first call-offset is 'this' adjustment
// # second call-offset is result adjustment
- Out << "_Tc";
+ Out << "_ZTc";
mangleCalloffset(nv_t, v_t);
mangleCalloffset(nv_r, v_r);
- mangleName(D);
+ mangleFunctionEncoding(FD);
}
void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) {
@@ -894,32 +895,32 @@
/// \brief Mangles the a thunk with the offset n for the declaration D and
/// emits that name to the given output stream.
- void mangleThunk(const NamedDecl *D, int64_t nv, int64_t v,
+ void mangleThunk(const FunctionDecl *FD, int64_t nv, int64_t v,
ASTContext &Context, llvm::raw_ostream &os) {
// FIXME: Hum, we might have to thunk these, fix.
- assert(!isa<CXXConstructorDecl>(D) &&
+ assert(!isa<CXXConstructorDecl>(FD) &&
"Use mangleCXXCtor for constructor decls!");
- assert(!isa<CXXDestructorDecl>(D) &&
+ assert(!isa<CXXDestructorDecl>(FD) &&
"Use mangleCXXDtor for destructor decls!");
CXXNameMangler Mangler(Context, os);
- Mangler.mangleThunk(D, nv, v);
+ Mangler.mangleThunk(FD, nv, v);
os.flush();
}
/// \brief Mangles the a covariant thunk for the declaration D and emits that
/// name to the given output stream.
- void mangleCovariantThunk(const NamedDecl *D, int64_t nv_t, int64_t v_t,
+ void mangleCovariantThunk(const FunctionDecl *FD, int64_t nv_t, int64_t v_t,
int64_t nv_r, int64_t v_r, ASTContext &Context,
llvm::raw_ostream &os) {
// FIXME: Hum, we might have to thunk these, fix.
- assert(!isa<CXXConstructorDecl>(D) &&
+ assert(!isa<CXXConstructorDecl>(FD) &&
"Use mangleCXXCtor for constructor decls!");
- assert(!isa<CXXDestructorDecl>(D) &&
+ assert(!isa<CXXDestructorDecl>(FD) &&
"Use mangleCXXDtor for destructor decls!");
CXXNameMangler Mangler(Context, os);
- Mangler.mangleCovariantThunk(D, nv_t, v_t, nv_r, v_r);
+ Mangler.mangleCovariantThunk(FD, nv_t, v_t, nv_r, v_r);
os.flush();
}
Modified: cfe/trunk/lib/CodeGen/Mangle.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/Mangle.h?rev=81143&r1=81142&r2=81143&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/Mangle.h (original)
+++ cfe/trunk/lib/CodeGen/Mangle.h Sun Sep 6 23:27:52 2009
@@ -29,12 +29,13 @@
class ASTContext;
class CXXConstructorDecl;
class CXXDestructorDecl;
+ class FunctionDecl;
class NamedDecl;
class VarDecl;
bool mangleName(const NamedDecl *D, ASTContext &Context,
llvm::raw_ostream &os);
- void mangleThunk(const NamedDecl *D, int64_t n, int64_t vn,
+ void mangleThunk(const FunctionDecl *FD, int64_t n, int64_t vn,
ASTContext &Context, llvm::raw_ostream &os);
void mangleCovariantThunk(const NamedDecl *D, bool VirtualThis, int64_t nv_t,
int64_t v_t, bool VirtualResult, int64_t nv_r,
Modified: cfe/trunk/test/CodeGenCXX/virt.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/virt.cpp?rev=81143&r1=81142&r2=81143&view=diff
==============================================================================
--- cfe/trunk/test/CodeGenCXX/virt.cpp (original)
+++ cfe/trunk/test/CodeGenCXX/virt.cpp Sun Sep 6 23:27:52 2009
@@ -722,6 +722,125 @@
// CHECK-LP64-NEXT: .quad __ZN8test11_D2D1Ev
// CHECK-LP64-NEXT: .quad __ZN8test11_D2D2Ev
+struct test13_B {
+ virtual void B1() { }
+ virtual void D() { }
+ virtual void Da();
+ virtual void Db() { }
+ virtual void Dc() { }
+ virtual void B2() { }
+ int i;
+};
+
+
+struct test13_NV1 {
+ virtual void fooNV1() { }
+ virtual void D() { }
+};
+
+
+struct test13_B2 : /* test13_NV1, */ virtual test13_B {
+ virtual void B2a() { }
+ virtual void B2() { }
+ virtual void D() { }
+ virtual void Da();
+ virtual void Dd() { }
+ virtual void B2b() { }
+ int i;
+};
+
+
+struct test13_D : test13_NV1, virtual test13_B2 {
+ virtual void D1() { }
+ virtual void D() { }
+ virtual void Db() { }
+ virtual void Dd() { }
+ virtual void D2() { }
+ virtual void fooNV1() { }
+} test13_d;
+
+// CHECK-LP64:__ZTV8test13_D:
+// CHECK-LP64-NEXT: .quad 24
+// CHECK-LP64-NEXT: .quad 8
+// CHECK-LP64-NEXT: .space 8
+// CHECK-LP64-NEXT: .quad __ZTI8test13_D
+// CHECK-LP64-NEXT: .quad __ZN8test13_D6fooNV1Ev
+// CHECK-LP64-NEXT: .quad __ZN8test13_D1DEv
+// CHECK-LP64-NEXT: .quad __ZN8test13_D2D1Ev
+// CHECK-LP64-NEXT: .quad __ZN8test13_D2DbEv
+// CHECK-LP64-NEXT: .quad __ZN8test13_D2DdEv
+// CHECK-LP64-NEXT: .quad __ZN8test13_D2D2Ev
+// CHECK-LP64-NEXT: .space 8
+// CHECK-LP64-NEXT: .quad 18446744073709551608
+// CHECK-LP64-NEXT: .space 8
+// CHECK-LP64-NEXT: .quad 18446744073709551608
+// CHECK-LP64-NEXT: .space 8
+// CHECK-LP64-NEXT: .space 8
+// CHECK-LP64-NEXT: .quad 16
+// CHECK-LP64-NEXT: .quad 18446744073709551608
+// CHECK-LP64-NEXT: .quad __ZTI8test13_D
+// CHECK-LP64-NEXT: .quad __ZN9test13_B23B2aEv
+// CHECK-LP64-NEXT: .quad __ZN9test13_B22B2Ev
+// CHECK-LP64-NEXT: .quad __ZTv0_n48_N8test13_D1DEv
+// CHECK-LP64-NEXT: .quad __ZN9test13_B22DaEv
+// CHECK-LP64-NEXT: .quad __ZTv0_n64_N8test13_D2DdEv
+// CHECK-LP64-NEXT: .quad __ZN9test13_B23B2bEv
+// CHECK-LP64-NEXT: .quad 18446744073709551600
+// CHECK-LP64-NEXT: .space 8
+// CHECK-LP64-NEXT: .quad 18446744073709551592
+// CHECK-LP64-NEXT: .quad 18446744073709551600
+// CHECK-LP64-NEXT: .quad 18446744073709551592
+// CHECK-LP64-NEXT: .space 8
+// CHECK-LP64-NEXT: .quad 18446744073709551592
+// CHECK-LP64-NEXT: .quad __ZTI8test13_D
+// CHECK-LP64-NEXT: .quad __ZN8test13_B2B1Ev
+// CHECK-LP64-NEXT: .quad __ZTv0_n32_N8test13_D1DEv
+// CHECK-LP64-NEXT: .quad __ZTv0_n40_N9test13_B22DaEv
+// CHECK-LP64-NEXT: .quad __ZTv0_n48_N8test13_D2DbEv
+// CHECK-LP64-NEXT: .quad __ZN8test13_B2DcEv
+// CHECK-LP64-NEXT: .quad __ZTv0_n64_N9test13_B22B2Ev
+
+// CHECK-LP32:__ZTV8test13_D:
+// CHECK-LP32-NEXT: .long 12
+// CHECK-LP32-NEXT: .long 4
+// CHECK-LP32-NEXT: .space 4
+// CHECK-LP32-NEXT: .long __ZTI8test13_D
+// CHECK-LP32-NEXT: .long __ZN8test13_D6fooNV1Ev
+// CHECK-LP32-NEXT: .long __ZN8test13_D1DEv
+// CHECK-LP32-NEXT: .long __ZN8test13_D2D1Ev
+// CHECK-LP32-NEXT: .long __ZN8test13_D2DbEv
+// CHECK-LP32-NEXT: .long __ZN8test13_D2DdEv
+// CHECK-LP32-NEXT: .long __ZN8test13_D2D2Ev
+// CHECK-LP32-NEXT: .space 4
+// CHECK-LP32-NEXT: .long 4294967292
+// CHECK-LP32-NEXT: .space 4
+// CHECK-LP32-NEXT: .long 4294967292
+// CHECK-LP32-NEXT: .space 4
+// CHECK-LP32-NEXT: .space 4
+// CHECK-LP32-NEXT: .long 8
+// CHECK-LP32-NEXT: .long 4294967292
+// CHECK-LP32-NEXT: .long __ZTI8test13_D
+// CHECK-LP32-NEXT: .long __ZN9test13_B23B2aEv
+// CHECK-LP32-NEXT: .long __ZN9test13_B22B2Ev
+// CHECK-LP32-NEXT: .long __ZTv0_n24_N8test13_D1DEv
+// CHECK-LP32-NEXT: .long __ZN9test13_B22DaEv
+// CHECK-LP32-NEXT: .long __ZTv0_n32_N8test13_D2DdEv
+// CHECK-LP32-NEXT: .long __ZN9test13_B23B2bEv
+// CHECK-LP32-NEXT: .long 4294967288
+// CHECK-LP32-NEXT: .space 4
+// CHECK-LP32-NEXT: .long 4294967284
+// CHECK-LP32-NEXT: .long 4294967288
+// CHECK-LP32-NEXT: .long 4294967284
+// CHECK-LP32-NEXT: .space 4
+// CHECK-LP32-NEXT: .long 4294967284
+// CHECK-LP32-NEXT: .long __ZTI8test13_D
+// CHECK-LP32-NEXT: .long __ZN8test13_B2B1Ev
+// CHECK-LP32-NEXT: .long __ZTv0_n16_N8test13_D1DEv
+// CHECK-LP32-NEXT: .long __ZTv0_n20_N9test13_B22DaEv
+// CHECK-LP32-NEXT: .long __ZTv0_n24_N8test13_D2DbEv
+// CHECK-LP32-NEXT: .long __ZN8test13_B2DcEv
+// CHECK-LP32-NEXT: .long __ZTv0_n32_N9test13_B22B2Ev
+
// CHECK-LP64: __ZTV1B:
// CHECK-LP64-NEXT: .space 8
More information about the cfe-commits
mailing list