[cfe-commits] r85905 - in /cfe/trunk: lib/CodeGen/CGCXX.cpp lib/CodeGen/CodeGenFunction.h test/CodeGenCXX/virt.cpp

Mike Stump mrs at apple.com
Tue Nov 3 08:59:27 PST 2009


Author: mrs
Date: Tue Nov  3 10:59:27 2009
New Revision: 85905

URL: http://llvm.org/viewvc/llvm-project?rev=85905&view=rev
Log:
Refine return value adjustments for thunks.

Modified:
    cfe/trunk/lib/CodeGen/CGCXX.cpp
    cfe/trunk/lib/CodeGen/CodeGenFunction.h
    cfe/trunk/test/CodeGenCXX/virt.cpp

Modified: cfe/trunk/lib/CodeGen/CGCXX.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGCXX.cpp?rev=85905&r1=85904&r2=85905&view=diff

==============================================================================
--- cfe/trunk/lib/CodeGen/CGCXX.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGCXX.cpp Tue Nov  3 10:59:27 2009
@@ -705,6 +705,38 @@
   return GenerateCovariantThunk(Fn, MD, Extern, nv, v, 0, 0);
 }
 
+llvm::Value *CodeGenFunction::DynamicTypeAdjust(llvm::Value *V, int64_t nv,
+                                                int64_t v) {
+  llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
+                                              0);
+  const llvm::Type *OrigTy = V->getType();
+  if (nv) {
+    // Do the non-virtual adjustment
+    V = Builder.CreateBitCast(V, Ptr8Ty);
+    V = Builder.CreateConstInBoundsGEP1_64(V, nv);
+    V = Builder.CreateBitCast(V, OrigTy);
+  }
+  if (v) {
+    // Do the virtual this adjustment
+    const llvm::Type *PtrDiffTy = 
+      ConvertType(getContext().getPointerDiffType());
+    llvm::Type *PtrPtr8Ty, *PtrPtrDiffTy;
+    PtrPtr8Ty = llvm::PointerType::get(Ptr8Ty, 0);
+    PtrPtrDiffTy = llvm::PointerType::get(PtrDiffTy, 0);
+    llvm::Value *ThisVal = Builder.CreateBitCast(V, Ptr8Ty);
+    V = Builder.CreateBitCast(V, PtrPtrDiffTy->getPointerTo());
+    V = Builder.CreateLoad(V, "vtable");
+    llvm::Value *VTablePtr = V;
+    assert(v % (LLVMPointerWidth/8) == 0 && "vtable entry unaligned");
+    v /= LLVMPointerWidth/8;
+    V = Builder.CreateConstInBoundsGEP1_64(VTablePtr, v);
+    V = Builder.CreateLoad(V);
+    V = Builder.CreateGEP(ThisVal, V);
+    V = Builder.CreateBitCast(V, OrigTy);
+  }
+  return V;
+}
+
 llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
                                                         const CXXMethodDecl *MD,
                                                         bool Extern,
@@ -746,33 +778,9 @@
 
   QualType ArgType = MD->getThisType(getContext());
   llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this");
-  llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
-                                              0);
-  const llvm::Type *OrigTy = Arg->getType();
-  if (nv_t) {
-    // Do the non-virtual this adjustment
-    Arg = Builder.CreateBitCast(Arg, Ptr8Ty);
-    Arg = Builder.CreateConstInBoundsGEP1_64(Arg, nv_t);
-    Arg = Builder.CreateBitCast(Arg, OrigTy);
-  }
-  if (v_t) {
-    // Do the virtual this adjustment
-    const llvm::Type *PtrDiffTy = 
-      ConvertType(getContext().getPointerDiffType());
-    llvm::Type *PtrPtr8Ty, *PtrPtrDiffTy;
-    PtrPtr8Ty = llvm::PointerType::get(Ptr8Ty, 0);
-    PtrPtrDiffTy = llvm::PointerType::get(PtrDiffTy, 0);
-    llvm::Value *ThisVal = Builder.CreateBitCast(Arg, Ptr8Ty);
-    Arg = Builder.CreateBitCast(Arg, PtrPtrDiffTy->getPointerTo());
-    Arg = Builder.CreateLoad(Arg, "vtable");
-    llvm::Value *VTablePtr = Arg;
-    assert(v_t % (LLVMPointerWidth/8) == 0 && "vtable entry unaligned");
-    v_t /= LLVMPointerWidth/8;
-    Arg = Builder.CreateConstInBoundsGEP1_64(VTablePtr, v_t);
-    Arg = Builder.CreateLoad(Arg);
-    Arg = Builder.CreateGEP(ThisVal, Arg);
-    Arg = Builder.CreateBitCast(Arg, OrigTy);
-  }
+  if (nv_t || v_t)
+    // Do the this adjustment.
+    Arg = DynamicTypeAdjust(Arg, nv_t, v_t);
   CallArgs.push_back(std::make_pair(RValue::get(Arg), ArgType));
 
   for (FunctionDecl::param_const_iterator i = MD->param_begin(),
@@ -790,7 +798,8 @@
   RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
                        Callee, CallArgs, MD);
   if (nv_r || v_r) {
-    // FIXME: Add return value adjustments.
+    // Do the return result adjustment.
+    RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), nv_r, v_r));
   }
 
   if (!ResultType->isVoidType())

Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=85905&r1=85904&r2=85905&view=diff

==============================================================================
--- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original)
+++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Tue Nov  3 10:59:27 2009
@@ -387,6 +387,11 @@
   /// GenerateVtable - Generate the vtable for the given type.
   llvm::Value *GenerateVtable(const CXXRecordDecl *RD);
 
+  /// DynamicTypeAdjust - Do the non-virtual and virtual adjustments on an
+  /// object pointer to alter the dynamic type of the pointer.  Used by
+  /// GenerateCovariantThunk for building thunks.
+  llvm::Value *DynamicTypeAdjust(llvm::Value *V, int64_t nv, int64_t v);
+
   /// GenerateThunk - Generate a thunk for the given method
   llvm::Constant *GenerateThunk(llvm::Function *Fn, const CXXMethodDecl *MD,
                                 bool Extern, int64_t nv, int64_t v);

Modified: cfe/trunk/test/CodeGenCXX/virt.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/virt.cpp?rev=85905&r1=85904&r2=85905&view=diff

==============================================================================
--- cfe/trunk/test/CodeGenCXX/virt.cpp (original)
+++ cfe/trunk/test/CodeGenCXX/virt.cpp Tue Nov  3 10:59:27 2009
@@ -91,6 +91,36 @@
 // CHECK-LP64: movl $1, 12(%rax)
 // CHECK-LP64: movl $2, 8(%rax)
 
+// FIXME: This is the wrong thunk, but until these issues are fixed, better
+// than nothing.
+// CHECK-LP64:     __ZTcvn16_n72_v16_n32_N8test16_D4foo1Ev27:
+// CHECK-LP64-NEXT:Leh_func_begin33:
+// CHECK-LP64-NEXT:    subq    $24, %rsp
+// CHECK-LP64-NEXT:Llabel33:
+// CHECK-LP64-NEXT:    movq    %rdi, %rax
+// CHECK-LP64-NEXT:    movq    %rax, 8(%rsp)
+// CHECK-LP64-NEXT:    movq    8(%rsp), %rax
+// CHECK-LP64-NEXT:    movq    %rax, %rcx
+// CHECK-LP64-NEXT:    movabsq $-16, %rdx
+// CHECK-LP64-NEXT:    addq    %rdx, %rcx
+// CHECK-LP64-NEXT:    movq    -16(%rax), %rax
+// CHECK-LP64-NEXT:    movq    -72(%rax), %rax
+// CHECK-LP64-NEXT:    addq    %rax, %rcx
+// CHECK-LP64-NEXT:    movq    %rcx, %rax
+// CHECK-LP64-NEXT:    movq    %rax, %rdi
+// CHECK-LP64-NEXT:    call    __ZN8test16_D4foo1Ev
+// CHECK-LP64-NEXT:    movq    %rax, %rcx
+// CHECK-LP64-NEXT:    movabsq $16, %rdx
+// CHECK-LP64-NEXT:    addq    %rdx, %rcx
+// CHECK-LP64-NEXT:    movq    16(%rax), %rax
+// CHECK-LP64-NEXT:    movq    -32(%rax), %rax
+// CHECK-LP64-NEXT:    addq    %rax, %rcx
+// CHECK-LP64-NEXT:    movq    %rcx, %rax
+// CHECK-LP64-NEXT:    movq    %rax, 16(%rsp)
+// CHECK-LP64-NEXT:    movq    16(%rsp), %rax
+// CHECK-LP64-NEXT:    addq    $24, %rsp
+// CHECK-LP64-NEXT:    ret
+
 struct test12_A {
   virtual void foo0() { }
   virtual void foo();





More information about the cfe-commits mailing list