r175389 - Re-apply r174919 - smarter copy/move assignment/construction, with fixes for
Chandler Carruth
chandlerc at google.com
Mon Feb 25 05:09:10 PST 2013
Hey Lang,
This appears to still be having trouble, although perhaps due to a latent
bug in the backend now: http://llvm.org/PR15348 breaks i386 *non* optimized
bootstrap.
Also, something I saw here looked really strange:
On Sat, Feb 16, 2013 at 11:22 PM, Lang Hames <lhames at gmail.com> wrote:
>
> +
> + unsigned FirstFieldAlign = ~0U; // Set to invalid.
> +
> + if (FirstField->isBitField()) {
> + const CGRecordLayout &RL =
> + CGF.getTypes().getCGRecordLayout(FirstField->getParent());
> + const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
> + FirstFieldAlign = BFInfo.StorageAlignment;
> + } else
> + FirstFieldAlign =
> CGF.getContext().getTypeAlign(FirstField->getType());
> +
> + assert(FirstFieldOffset % FirstFieldAlign == 0 && "Bad field
> alignment.");
> + CharUnits Alignment =
> + CGF.getContext().toCharUnitsFromBits(FirstFieldAlign);
>
While ASTContext::getTypeAlign operates in bits, isn't
CGBitFieldInfo::StorageAlignment in terms of char units? See
CGRecordLayoutBuilder.cpp:285.
I think the pod-member-memcpys test could be beefed up on the bitfield
front -- it doesn't actually test the case of the first field being a
bitfield? Maybe that's tested elsewhere.
> + CharUnits MemcpySize = getMemcpySize();
> + QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
> + llvm::Value *ThisPtr = CGF.LoadCXXThis();
> + LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
> + LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV,
> FirstField);
> + llvm::Value *SrcPtr =
> CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
> + LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
> + LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV,
> FirstField);
> +
> + emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() :
> Dest.getAddress(),
> + Src.isBitField() ? Src.getBitFieldAddr() :
> Src.getAddress(),
> + MemcpySize, Alignment);
> + reset();
> + }
> +
> + void reset() {
> + FirstField = 0;
> + }
> +
> + protected:
> + CodeGenFunction &CGF;
> + const CXXRecordDecl *ClassDecl;
> +
> + private:
> +
> + void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
> + CharUnits Size, CharUnits Alignment) {
> + llvm::PointerType *DPT =
> cast<llvm::PointerType>(DestPtr->getType());
> + llvm::Type *DBP =
> + llvm::Type::getInt8PtrTy(CGF.getLLVMContext(),
> DPT->getAddressSpace());
> + DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
> +
> + llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
> + llvm::Type *SBP =
> + llvm::Type::getInt8PtrTy(CGF.getLLVMContext(),
> SPT->getAddressSpace());
> + SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
> +
> + CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
> + Alignment.getQuantity());
> + }
> +
> + void addInitialField(FieldDecl *F) {
> + FirstField = F;
> + LastField = F;
> + FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
> + LastFieldOffset = FirstFieldOffset;
> + LastAddedFieldIndex = F->getFieldIndex();
> + return;
> + }
> +
> + void addNextField(FieldDecl *F) {
> + assert(F->getFieldIndex() == LastAddedFieldIndex + 1 &&
> + "Cannot aggregate non-contiguous fields.");
> + LastAddedFieldIndex = F->getFieldIndex();
> +
> + // The 'first' and 'last' fields are chosen by offset, rather than
> field
> + // index. This allows the code to support bitfields, as well as
> regular
> + // fields.
> + uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
> + if (FOffset < FirstFieldOffset) {
> + FirstField = F;
> + FirstFieldOffset = FOffset;
> + } else if (FOffset > LastFieldOffset) {
> + LastField = F;
> + LastFieldOffset = FOffset;
> + }
> + }
> +
> + const VarDecl *SrcRec;
> + const ASTRecordLayout &RecLayout;
> + FieldDecl *FirstField;
> + FieldDecl *LastField;
> + uint64_t FirstFieldOffset, LastFieldOffset;
> + unsigned LastAddedFieldIndex;
> + };
> +
> + class ConstructorMemcpyizer : public FieldMemcpyizer {
> + private:
> +
> + /// Get source argument for copy constructor. Returns null if not a
> copy
> + /// constructor.
> + static const VarDecl* getTrivialCopySource(const CXXConstructorDecl
> *CD,
> + FunctionArgList &Args) {
> + if (CD->isCopyOrMoveConstructor() && CD->isImplicitlyDefined())
> + return Args[Args.size() - 1];
> + return 0;
> + }
> +
> + // Returns true if a CXXCtorInitializer represents a member
> initialization
> + // that can be rolled into a memcpy.
> + bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
> + if (!MemcpyableCtor)
> + return false;
> + FieldDecl *Field = MemberInit->getMember();
> + assert(Field != 0 && "No field for member init.");
> + QualType FieldType = Field->getType();
> + CXXConstructExpr *CE =
> dyn_cast<CXXConstructExpr>(MemberInit->getInit());
> +
> + // Bail out on non-POD, not-trivially-constructable members.
> + if (!(CE && CE->getConstructor()->isTrivial()) &&
> + !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
> + FieldType->isReferenceType()))
> + return false;
> +
> + // Bail out on volatile fields.
> + if (!isMemcpyableField(Field))
> + return false;
> +
> + // Otherwise we're good.
> + return true;
> + }
> +
> + public:
> + ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl
> *CD,
> + FunctionArgList &Args)
> + : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD,
> Args)),
> + ConstructorDecl(CD),
> + MemcpyableCtor(CD->isImplicitlyDefined() &&
> + CD->isCopyOrMoveConstructor() &&
> + CGF.getLangOpts().getGC() == LangOptions::NonGC),
> + Args(Args) { }
> +
> + void addMemberInitializer(CXXCtorInitializer *MemberInit) {
> + if (isMemberInitMemcpyable(MemberInit)) {
> + AggregatedInits.push_back(MemberInit);
> + addMemcpyableField(MemberInit->getMember());
> + } else {
> + emitAggregatedInits();
> + EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
> MemberInit,
> + ConstructorDecl, Args);
> + }
> + }
> +
> + void emitAggregatedInits() {
> + if (AggregatedInits.size() <= 1) {
> + // This memcpy is too small to be worthwhile. Fall back on default
> + // codegen.
> + for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
> + EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
> + AggregatedInits[i], ConstructorDecl,
> Args);
> + }
> + reset();
> + return;
> + }
> +
> + pushEHDestructors();
> + emitMemcpy();
> + AggregatedInits.clear();
> + }
> +
> + void pushEHDestructors() {
> + llvm::Value *ThisPtr = CGF.LoadCXXThis();
> + QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
> + LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
> +
> + for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
> + QualType FieldType = AggregatedInits[i]->getMember()->getType();
> + QualType::DestructionKind dtorKind = FieldType.isDestructedType();
> + if (CGF.needsEHCleanup(dtorKind))
> + CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
> + }
> + }
> +
> + void finish() {
> + emitAggregatedInits();
> + }
> +
> + private:
> + const CXXConstructorDecl *ConstructorDecl;
> + bool MemcpyableCtor;
> + FunctionArgList &Args;
> + SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
> + };
> +
> + class AssignmentMemcpyizer : public FieldMemcpyizer {
> + private:
> +
> + // Returns the memcpyable field copied by the given statement, if one
> + // exists. Otherwise r
> + FieldDecl* getMemcpyableField(Stmt *S) {
> + if (!AssignmentsMemcpyable)
> + return 0;
> + if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
> + // Recognise trivial assignments.
> + if (BO->getOpcode() != BO_Assign)
> + return 0;
> + MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
> + if (!ME)
> + return 0;
> + FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
> + if (!Field || !isMemcpyableField(Field))
> + return 0;
> + Stmt *RHS = BO->getRHS();
> + if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
> + RHS = EC->getSubExpr();
> + if (!RHS)
> + return 0;
> + MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
> + if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
> + return 0;
> + return Field;
> + } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S))
> {
> + CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
> + if (!(MD && (MD->isCopyAssignmentOperator() ||
> + MD->isMoveAssignmentOperator()) &&
> + MD->isTrivial()))
> + return 0;
> + MemberExpr *IOA =
> dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
> + if (!IOA)
> + return 0;
> + FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
> + if (!Field || !isMemcpyableField(Field))
> + return 0;
> + MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
> + if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
> + return 0;
> + return Field;
> + } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
> + FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
> + if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
> + return 0;
> + Expr *DstPtr = CE->getArg(0);
> + if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
> + DstPtr = DC->getSubExpr();
> + UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
> + if (!DUO || DUO->getOpcode() != UO_AddrOf)
> + return 0;
> + MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
> + if (!ME)
> + return 0;
> + FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
> + if (!Field || !isMemcpyableField(Field))
> + return 0;
> + Expr *SrcPtr = CE->getArg(1);
> + if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
> + SrcPtr = SC->getSubExpr();
> + UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
> + if (!SUO || SUO->getOpcode() != UO_AddrOf)
> + return 0;
> + MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
> + if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
> + return 0;
> + return Field;
> + }
> +
> + return 0;
> + }
> +
> + bool AssignmentsMemcpyable;
> + SmallVector<Stmt*, 16> AggregatedStmts;
> +
> + public:
> +
> + AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
> + FunctionArgList &Args)
> + : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
> + AssignmentsMemcpyable(CGF.getLangOpts().getGC() ==
> LangOptions::NonGC) {
> + assert(Args.size() == 2);
> + }
> +
> + void emitAssignment(Stmt *S) {
> + FieldDecl *F = getMemcpyableField(S);
> + if (F) {
> + addMemcpyableField(F);
> + AggregatedStmts.push_back(S);
> + } else {
> + emitAggregatedStmts();
> + CGF.EmitStmt(S);
> + }
> + }
> +
> + void emitAggregatedStmts() {
> + if (AggregatedStmts.size() <= 1) {
> + for (unsigned i = 0; i < AggregatedStmts.size(); ++i)
> + CGF.EmitStmt(AggregatedStmts[i]);
> + reset();
> + }
> +
> + emitMemcpy();
> + AggregatedStmts.clear();
> + }
> +
> + void finish() {
> + emitAggregatedStmts();
> + }
> + };
> +
> +}
> +
> /// EmitCtorPrologue - This routine generates necessary code to initialize
> /// base classes and non-static data members belonging to this
> constructor.
> void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
> @@ -770,8 +1118,10 @@ void CodeGenFunction::EmitCtorPrologue(c
>
> InitializeVTablePointers(ClassDecl);
>
> + ConstructorMemcpyizer CM(*this, CD, Args);
> for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
> - EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD,
> Args);
> + CM.addMemberInitializer(MemberInitializers[I]);
> + CM.finish();
> }
>
> static bool
> @@ -940,6 +1290,24 @@ void CodeGenFunction::EmitDestructorBody
> ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
> }
>
> +void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList
> &Args) {
> + const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
> + const Stmt *RootS = AssignOp->getBody();
> + assert(isa<CompoundStmt>(RootS) &&
> + "Body of an implicit assignment operator should be compound
> stmt.");
> + const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
> +
> + LexicalScope Scope(*this, RootCS->getSourceRange());
> +
> + AssignmentMemcpyizer AM(*this, AssignOp, Args);
> + for (CompoundStmt::const_body_iterator I = RootCS->body_begin(),
> + E = RootCS->body_end();
> + I != E; ++I) {
> + AM.emitAssignment(*I);
> + }
> + AM.finish();
> +}
> +
> namespace {
> /// Call the operator delete associated with the current destructor.
> struct CallDtorDelete : EHScopeStack::Cleanup {
>
> Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.cpp?rev=175389&r1=175388&r2=175389&view=diff
>
> ==============================================================================
> --- cfe/trunk/lib/CodeGen/CodeGenFunction.cpp (original)
> +++ cfe/trunk/lib/CodeGen/CodeGenFunction.cpp Sun Feb 17 01:22:09 2013
> @@ -560,6 +560,11 @@ void CodeGenFunction::GenerateCode(Globa
> // The lambda "__invoke" function is special, because it forwards or
> // clones the body of the function call operator (but is actually
> static).
> EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
> + } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
> + cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator()) {
> + // Implicit copy-assignment gets the same special treatment as
> implicit
> + // copy-constructors.
> + emitImplicitAssignmentOperatorBody(Args);
> }
> else
> EmitFunctionBody(Args);
>
> Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h
> URL:
> http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=175389&r1=175388&r2=175389&view=diff
>
> ==============================================================================
> --- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original)
> +++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Sun Feb 17 01:22:09 2013
> @@ -1401,6 +1401,7 @@ public:
>
> void EmitConstructorBody(FunctionArgList &Args);
> void EmitDestructorBody(FunctionArgList &Args);
> + void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
> void EmitFunctionBody(FunctionArgList &Args);
>
> void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
>
> Modified: cfe/trunk/test/CodeGenCXX/copy-assign-synthesis-1.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/copy-assign-synthesis-1.cpp?rev=175389&r1=175388&r2=175389&view=diff
>
> ==============================================================================
> --- cfe/trunk/test/CodeGenCXX/copy-assign-synthesis-1.cpp (original)
> +++ cfe/trunk/test/CodeGenCXX/copy-assign-synthesis-1.cpp Sun Feb 17
> 01:22:09 2013
> @@ -96,14 +96,8 @@ int main() {
> // CHECK-LP64: .globl __ZN1XaSERKS_
> // CHECK-LP64: .weak_definition __ZN1XaSERKS_
> // CHECK-LP64: __ZN1XaSERKS_:
> -// CHECK-LP64: .globl __ZN1QaSERKS_
> -// CHECK-LP64: .weak_definition __ZN1QaSERKS_
> -// CHECK-LP64: __ZN1QaSERKS_:
>
> // CHECK-LP32: .globl __ZN1XaSERKS_
> // CHECK-LP32: .weak_definition __ZN1XaSERKS_
> // CHECK-LP32: __ZN1XaSERKS_:
> -// CHECK-LP32: .globl __ZN1QaSERKS_
> -// CHECK-LP32: .weak_definition __ZN1QaSERKS_
> -// CHECK-LP32: __ZN1QaSERKS_:
>
>
> Modified: cfe/trunk/test/CodeGenCXX/implicit-copy-assign-operator.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/implicit-copy-assign-operator.cpp?rev=175389&r1=175388&r2=175389&view=diff
>
> ==============================================================================
> --- cfe/trunk/test/CodeGenCXX/implicit-copy-assign-operator.cpp (original)
> +++ cfe/trunk/test/CodeGenCXX/implicit-copy-assign-operator.cpp Sun Feb 17
> 01:22:09 2013
> @@ -44,7 +44,7 @@ void test_D(D d1, D d2) {
> // CHECK: {{call.*_ZN1AaSERS_}}
> // CHECK: {{call.*_ZN1BaSERS_}}
> // CHECK: {{call.*_ZN1CaSERKS_}}
> -// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 24}}
> +// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 28}}
> // CHECK: {{call.*_ZN1BaSERS_}}
> // CHECK: br
> // CHECK: {{call.*_ZN1CaSERKS_}}
>
> Modified: cfe/trunk/test/CodeGenCXX/implicit-copy-constructor.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/implicit-copy-constructor.cpp?rev=175389&r1=175388&r2=175389&view=diff
>
> ==============================================================================
> --- cfe/trunk/test/CodeGenCXX/implicit-copy-constructor.cpp (original)
> +++ cfe/trunk/test/CodeGenCXX/implicit-copy-constructor.cpp Sun Feb 17
> 01:22:09 2013
> @@ -46,7 +46,7 @@ void f(D d) {
> // CHECK: call void @_ZN1AD1Ev
> // CHECK: call void @_ZN1AC2ERS_
> // CHECK: call void @_ZN1BC2ERS_
> -// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 24}}
> +// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 28}}
> // CHECK: call void @_ZN1BC1ERS_
> // CHECK: br
> // CHECK: {{icmp ult.*, 2}}
> @@ -54,8 +54,7 @@ void f(D d) {
> // CHECK: call void @_ZN1AC1Ev
> // CHECK: call void @_ZN1CC1ERS_1A
> // CHECK: call void @_ZN1AD1Ev
> -// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 288}}
> -// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 12}}
> +// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 300}}
> // CHECK: ret void
>
>
>
> Added: cfe/trunk/test/CodeGenCXX/pod-member-memcpys.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/pod-member-memcpys.cpp?rev=175389&view=auto
>
> ==============================================================================
> --- cfe/trunk/test/CodeGenCXX/pod-member-memcpys.cpp (added)
> +++ cfe/trunk/test/CodeGenCXX/pod-member-memcpys.cpp Sun Feb 17 01:22:09
> 2013
> @@ -0,0 +1,224 @@
> +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -std=c++03
> -fexceptions -fcxx-exceptions -O1 -o - %s | FileCheck %s
> +
> +struct POD {
> + int w, x, y, z;
> +};
> +
> +struct PODLike {
> + int w, x, y, z;
> + PODLike();
> + ~PODLike();
> +};
> +
> +struct NonPOD {
> + NonPOD();
> + NonPOD(const NonPOD&);
> + NonPOD& operator=(const NonPOD&);
> +};
> +
> +struct Basic {
> + int a, b, c, d;
> + NonPOD np;
> + int w, x, y, z;
> +};
> +
> +struct PODMember {
> + int a, b, c, d;
> + POD p;
> + NonPOD np;
> + int w, x, y, z;
> +};
> +
> +struct PODLikeMember {
> + int a, b, c, d;
> + PODLike pl;
> + NonPOD np;
> + int w, x, y, z;
> +};
> +
> +struct ArrayMember {
> + int a, b, c, d;
> + int e[12];
> + NonPOD np;
> + int f[12];
> + int w, x, y, z;
> +};
> +
> +struct VolatileMember {
> + int a, b, c, d;
> + volatile int v;
> + NonPOD np;
> + int w, x, y, z;
> +};
> +
> +struct BitfieldMember {
> + int a, b, c, d;
> + NonPOD np;
> + int w : 6;
> + int x : 6;
> + int y : 6;
> + int z : 6;
> +};
> +
> +struct InnerClassMember {
> + struct {
> + int a, b, c, d;
> + } a;
> + int b, c, d, e;
> + NonPOD np;
> + int w, x, y, z;
> +};
> +
> +struct ReferenceMember {
> + ReferenceMember(int &a, int &b, int &c, int &d)
> + : a(a), b(b), c(c), d(d) {}
> + int &a;
> + int &b;
> + NonPOD np;
> + int &c;
> + int &d;
> +};
> +
> +// COPY-ASSIGNMENT OPERATORS:
> +
> +// Assignment operators are output in the order they're encountered.
> +
> +#define CALL_AO(T) void callAO##T(T& a, const T& b) { a = b; }
> +
> +CALL_AO(Basic)
> +CALL_AO(PODMember)
> +CALL_AO(PODLikeMember)
> +CALL_AO(ArrayMember)
> +CALL_AO(VolatileMember)
> +CALL_AO(BitfieldMember)
> +CALL_AO(InnerClassMember)
> +
> +// Basic copy-assignment:
> +// CHECK: define linkonce_odr %struct.Basic*
> @_ZN5BasicaSERKS_(%struct.Basic* %this, %struct.Basic*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: tail call %struct.NonPOD* @_ZN6NonPODaSERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret %struct.Basic* %this
> +
> +// PODMember copy-assignment:
> +// CHECK: define linkonce_odr %struct.PODMember*
> @_ZN9PODMemberaSERKS_(%struct.PODMember* %this, %struct.PODMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 32, i32
> 4{{.*}})
> +// CHECK: tail call %struct.NonPOD* @_ZN6NonPODaSERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret %struct.PODMember* %this
> +
> +// PODLikeMember copy-assignment:
> +// CHECK: define linkonce_odr %struct.PODLikeMember*
> @_ZN13PODLikeMemberaSERKS_(%struct.PODLikeMember* %this,
> %struct.PODLikeMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 32, i32
> 4{{.*}})
> +// CHECK: tail call %struct.NonPOD* @_ZN6NonPODaSERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret %struct.PODLikeMember* %this
> +
> +// ArrayMember copy-assignment:
> +// CHECK: define linkonce_odr %struct.ArrayMember*
> @_ZN11ArrayMemberaSERKS_(%struct.ArrayMember* %this, %struct.ArrayMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 64, i32
> 4{{.*}})
> +// CHECK: tail call %struct.NonPOD* @_ZN6NonPODaSERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 64, i32
> 4{{.*}})
> +// CHECK: ret %struct.ArrayMember* %this
> +
> +// VolatileMember copy-assignment:
> +// CHECK: define linkonce_odr %struct.VolatileMember*
> @_ZN14VolatileMemberaSERKS_(%struct.VolatileMember* %this,
> %struct.VolatileMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: load volatile i32* {{.*}}, align 4
> +// CHECK: store volatile i32 {{.*}}, align 4
> +// CHECK: tail call %struct.NonPOD* @_ZN6NonPODaSERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret %struct.VolatileMember* %this
> +
> +// BitfieldMember copy-assignment:
> +// CHECK: define linkonce_odr %struct.BitfieldMember*
> @_ZN14BitfieldMemberaSERKS_(%struct.BitfieldMember* %this,
> %struct.BitfieldMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: tail call %struct.NonPOD* @_ZN6NonPODaSERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 3, i32
> 1{{.*}})
> +// CHECK: ret %struct.BitfieldMember* %this
> +
> +// InnerClass copy-assignment:
> +// CHECK: define linkonce_odr %struct.InnerClassMember*
> @_ZN16InnerClassMemberaSERKS_(%struct.InnerClassMember* %this,
> %struct.InnerClassMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 32, i32
> 4{{.*}})
> +// CHECK: tail call %struct.NonPOD* @_ZN6NonPODaSERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret %struct.InnerClassMember* %this
> +
> +// COPY-CONSTRUCTORS:
> +
> +// Clang outputs copy-constructors in the reverse of the order that
> +// copy-constructor calls are encountered. Add functions that call the
> copy
> +// constructors of the classes above in reverse order here.
> +
> +#define CALL_CC(T) T callCC##T(const T& b) { return b; }
> +
> +CALL_CC(ReferenceMember)
> +CALL_CC(InnerClassMember)
> +CALL_CC(BitfieldMember)
> +CALL_CC(VolatileMember)
> +CALL_CC(ArrayMember)
> +CALL_CC(PODLikeMember)
> +CALL_CC(PODMember)
> +CALL_CC(Basic)
> +
> +// Basic copy-constructor:
> +// CHECK: define linkonce_odr void @_ZN5BasicC2ERKS_(%struct.Basic*
> %this, %struct.Basic*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: tail call void @_ZN6NonPODC1ERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret void
> +
> +// PODMember copy-constructor:
> +// CHECK: define linkonce_odr void
> @_ZN9PODMemberC2ERKS_(%struct.PODMember* %this, %struct.PODMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 32, i32
> 4{{.*}})
> +// CHECK: tail call void @_ZN6NonPODC1ERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret void
> +
> +// PODLikeMember copy-constructor:
> +// CHECK: define linkonce_odr void
> @_ZN13PODLikeMemberC2ERKS_(%struct.PODLikeMember* %this,
> %struct.PODLikeMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 32, i32
> 4{{.*}})
> +// CHECK: invoke void @_ZN6NonPODC1ERKS_
> +// CHECK: invoke.cont:
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret void
> +// CHECK: lpad:
> +// CHECK: landingpad
> +// CHECK: invoke void @_ZN7PODLikeD1Ev
> +
> +// ArrayMember copy-constructor:
> +// CHECK: define linkonce_odr void
> @_ZN11ArrayMemberC2ERKS_(%struct.ArrayMember* %this, %struct.ArrayMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 64, i32
> 4{{.*}})
> +// CHECK: tail call void @_ZN6NonPODC1ERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 64, i32
> 4{{.*}})
> +// CHECK: ret void
> +
> +// VolatileMember copy-constructor:
> +// CHECK: define linkonce_odr void
> @_ZN14VolatileMemberC2ERKS_(%struct.VolatileMember* %this,
> %struct.VolatileMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: load volatile i32* {{.*}}, align 4
> +// CHECK: store volatile i32 {{.*}}, align 4
> +// CHECK: tail call void @_ZN6NonPODC1ERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret void
> +
> +// BitfieldMember copy-constructor:
> +// CHECK: define linkonce_odr void
> @_ZN14BitfieldMemberC2ERKS_(%struct.BitfieldMember* %this,
> %struct.BitfieldMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: tail call void @_ZN6NonPODC1ERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 3, i32
> 1{{.*}})
> +// CHECK: ret void
> +
> +// InnerClass copy-constructor:
> +// CHECK: define linkonce_odr void
> @_ZN16InnerClassMemberC2ERKS_(%struct.InnerClassMember* %this,
> %struct.InnerClassMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 32, i32
> 4{{.*}})
> +// CHECK: tail call void @_ZN6NonPODC1ERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 4{{.*}})
> +// CHECK: ret void
> +
> +// ReferenceMember copy-constructor:
> +// CHECK: define linkonce_odr void
> @_ZN15ReferenceMemberC2ERKS_(%struct.ReferenceMember* %this,
> %struct.ReferenceMember*)
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 8{{.*}})
> +// CHECK: tail call void @_ZN6NonPODC1ERKS_
> +// CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 16, i32
> 8{{.*}})
> +// CHECK: ret void
>
>
> _______________________________________________
> cfe-commits mailing list
> cfe-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/cfe-commits/attachments/20130225/86fb724c/attachment.html>
More information about the cfe-commits
mailing list