[llvm] [llvm] Ensure propagated constants in the vtable are aligned (PR #136630)

via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 21 15:50:27 PDT 2025


https://github.com/PiJoules created https://github.com/llvm/llvm-project/pull/136630

It's possible for virtual constant propagation in whole program devirtualization to create unaligned loads. We originally saw this with 4-byte aligned relative vtables where we could store 8-byte values before/after the vtable. But since the vtable is 4-byte aligned and we unconditionally do an 8-byte load, we can't guarantee that the stored constant will always be aligned to 8 bytes. We can also see this with normal vtables whenever a 1-byte char is stored in the vtable because the offset calculation for the GEP doesn't take into account the original vtable alignment.

This patch introduces two changes to virtual constant propagation:
1. Do not propagate constants whose preferred alignment is larger than the vtable alignment. This is required because if the constants are stored in the vtable, we can only guarantee the constant will be stored at an address at most aligned to the vtable's alignment.
2. Round up the offset used in the GEP before the load to ensure it's at an address suitably aligned such that we can load from it.

This patch updates tests to reflect this alignment change and adds some cases for relative vtables. 

>From a69c770061855af2a2347fa54572534b29538847 Mon Sep 17 00:00:00 2001
From: Leonard Chan <leonardchan at google.com>
Date: Mon, 21 Apr 2025 15:33:40 -0700
Subject: [PATCH] [llvm] Ensure propagated constants in the vtable are aligned

---
 .../lib/Transforms/IPO/WholeProgramDevirt.cpp |  44 ++-
 .../virtual-const-prop-begin.ll               | 125 ++++++--
 .../virtual-const-prop-check.ll               |  91 +++++-
 .../virtual-const-prop-end.ll                 |  93 +++++-
 .../virtual-const-prop-small-alignment-32.ll  | 291 +++++++++++++++++
 .../virtual-const-prop-small-alignment-64.ll  | 292 ++++++++++++++++++
 .../Transforms/IPO/WholeProgramDevirt.cpp     |   2 +-
 7 files changed, 882 insertions(+), 56 deletions(-)
 create mode 100644 llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
 create mode 100644 llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll

diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 0680b2c558807..b9f5e5d893c9a 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -226,6 +226,10 @@ struct PatternList {
     return false;
   }
 };
+
+inline uint64_t RoundUp(uint64_t Size, uint64_t Align) {
+  return (Size + (Align - 1)) / Align * Align;
+}
 } // namespace
 
 // Find the minimum offset that we may store a value of size Size bits at. If
@@ -298,7 +302,9 @@ wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets,
           ++Byte;
         }
       }
-      return (MinByte + I) * 8;
+      // Rounding up ensures the constant is always stored at address we
+      // can directly load from without misalignment.
+      return RoundUp((MinByte + I) * 8, Size);
     NextI:;
     }
   }
@@ -1834,9 +1840,19 @@ bool DevirtModule::tryVirtualConstProp(
   if (!RetType)
     return false;
   unsigned BitWidth = RetType->getBitWidth();
+
+  // TODO: Since we can evaluated these constants at compile-time, we can save
+  // some space by calculating the smallest range of values that all these
+  // constants can fit in, then only allocate enough space to fit those values.
+  // At each callsite, we can get the original type by doing a sign/zero
+  // extension. For example, if we would store an i64, but we can see that all
+  // the values fit into an i16, then we can store an i16 before/after the
+  // vtable and at each callsite do a s/zext.
   if (BitWidth > 64)
     return false;
 
+  Align TypeAlignment = M.getDataLayout().getPrefTypeAlign(RetType);
+
   // Make sure that each function is defined, does not access memory, takes at
   // least one argument, does not use its first argument (which we assume is
   // 'this'), and has the same return type.
@@ -1861,6 +1877,18 @@ bool DevirtModule::tryVirtualConstProp(
         Fn->arg_empty() || !Fn->arg_begin()->use_empty() ||
         Fn->getReturnType() != RetType)
       return false;
+
+    // This only works if the integer size is at most the alignment of the
+    // vtable. If the table is underaligned, then we can't guarantee that the
+    // constant will always be aligned to the integer type alignment. For
+    // example, if the table is `align 1`, we can never guarantee that an i32
+    // stored before/after the vtable is 32-bit aligned without changing the
+    // alignment of the new global.
+    GlobalVariable *GV = Target.TM->Bits->GV;
+    Align TableAlignment = M.getDataLayout().getValueOrABITypeAlignment(
+        GV->getAlign(), GV->getValueType());
+    if (TypeAlignment > TableAlignment)
+      return false;
   }
 
   for (auto &&CSByConstantArg : SlotInfo.ConstCSInfo) {
@@ -1880,6 +1908,12 @@ bool DevirtModule::tryVirtualConstProp(
 
     // Find an allocation offset in bits in all vtables associated with the
     // type.
+    // TODO: If a series of i1s would be placed after the vtable, it would
+    // help save some space if they were placed at the very end after all
+    // other larger-size constants. Having these i1s anywhere in the middle
+    // of the allocation would mean extra padding is needed for any subsequent
+    // constants, but having them at the end wouldn't require this padding
+    // at the very end.
     uint64_t AllocBefore =
         findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth);
     uint64_t AllocAfter =
@@ -1911,6 +1945,14 @@ bool DevirtModule::tryVirtualConstProp(
       setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte,
                            OffsetBit);
 
+    // In an earlier check we forbade constant propagation from operating on
+    // tables whose alignment is less than the alignment needed for loading
+    // the constant. Thus, the address we take the offset from will always be
+    // aligned to at least this integer alignment. Now, we need to ensure that
+    // the offset is also aligned to this integer alignment to ensure we always
+    // have an aligned load.
+    assert(OffsetByte % TypeAlignment.value() == 0);
+
     if (RemarksEnabled || AreStatisticsEnabled())
       for (auto &&Target : TargetsForSlot)
         Target.WasDevirt = true;
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
index 11273804e2081..3ed8027522788 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
@@ -1,34 +1,35 @@
 ; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
 
 target datalayout = "e-p:64:64"
-target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\01\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+;; Note that i16 is used here such that we can ensure all constants for "typeid"
+;; can come before the vtable.
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\03\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i16], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
 @vt1 = constant [3 x ptr] [
 ptr @vf0i1,
 ptr @vf1i1,
-ptr @vf1i32
+ptr @vf1i16
 ], section "vt1sec", !type !0
 
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\02\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\04\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i16], [0 x i8] zeroinitializer }, !type [[T8]]
 @vt2 = constant [3 x ptr] [
 ptr @vf1i1,
 ptr @vf0i1,
-ptr @vf2i32
+ptr @vf2i16
 ], !type !0
 
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [5 x i8], [3 x ptr], [0 x i8] } { [5 x i8] c"\03\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [0 x i8] zeroinitializer }, align 1, !type [[T5:![0-9]+]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\05\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i16], [0 x i8] zeroinitializer }, align 2, !type [[T5:![0-9]+]]
 @vt3 = constant [3 x ptr] [
 ptr @vf0i1,
 ptr @vf1i1,
-ptr @vf3i32
-], align 1, !type !0
+ptr @vf3i16
+], align 2, !type !0
 
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\04\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [0 x i8] zeroinitializer },  align 16, !type [[T16:![0-9]+]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\06\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i16], [0 x i8] zeroinitializer },  align 16, !type [[T16:![0-9]+]]
 @vt4 = constant [3 x ptr] [
 ptr @vf1i1,
 ptr @vf0i1,
-ptr @vf4i32
+ptr @vf4i16
 ], align 16, !type !0
 
 ; CHECK: @vt5 = {{.*}}, !type [[T0:![0-9]+]]
@@ -38,10 +39,35 @@ ptr @__cxa_pure_virtual,
 ptr @__cxa_pure_virtual
 ], !type !0
 
+;; Test relative vtables
+; CHECK:      [[VT6RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\03\00", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i16 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i16 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
+; CHECK:      [[VT7RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\04\00", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i16 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i16 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !2
+
 ; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
 ; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [5 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
 ; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
 
 define i1 @vf0i1(ptr %this) readnone {
   ret i1 0
@@ -51,20 +77,20 @@ define i1 @vf1i1(ptr %this) readnone {
   ret i1 1
 }
 
-define i32 @vf1i32(ptr %this) readnone {
-  ret i32 1
+define i16 @vf1i16(ptr %this) readnone {
+  ret i16 3
 }
 
-define i32 @vf2i32(ptr %this) readnone {
-  ret i32 2
+define i16 @vf2i16(ptr %this) readnone {
+  ret i16 4
 }
 
-define i32 @vf3i32(ptr %this) readnone {
-  ret i32 3
+define i16 @vf3i16(ptr %this) readnone {
+  ret i16 5
 }
 
-define i32 @vf4i32(ptr %this) readnone {
-  ret i32 4
+define i16 @vf4i16(ptr %this) readnone {
+  ret i16 6
 }
 
 ; CHECK: define i1 @call1(
@@ -87,7 +113,7 @@ define i1 @call2(ptr %obj) {
   %vtable = load ptr, ptr %obj
   %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
   call void @llvm.assume(i1 %p)
-  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i16 0, i16 1
   %fptr = load ptr, ptr %fptrptr
   ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
   ; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, ptr [[VTGEP2]]
@@ -98,27 +124,68 @@ define i1 @call2(ptr %obj) {
   ret i1 %result
 }
 
-; CHECK: define i32 @call3(
-define i32 @call3(ptr %obj) {
+; CHECK: define i16 @call3(
+define i16 @call3(ptr %obj) {
   %vtable = load ptr, ptr %obj
   %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
   call void @llvm.assume(i1 %p)
-  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i16 0, i16 2
   %fptr = load ptr, ptr %fptrptr
-  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -5
-  ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
-  %result = call i32 %fptr(ptr %obj)
-  ; CHECK: ret i32 [[VTLOAD3]]
-  ret i32 %result
+  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -4
+  ; CHECK: [[VTLOAD3:%[^ ]*]] = load i16, ptr [[VTGEP3]]
+  %result = call i16 %fptr(ptr %obj)
+  ; CHECK: ret i16 [[VTLOAD3]]
+  ret i16 %result
+}
+
+; CHECK: define i1 @call1_rel(
+define i1 @call1_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i1 @call2_rel(
+define i1 @call2_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt6_rel
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i16 @call3_rel(
+define i16 @call3_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -2
+  ; CHECK: [[VTLOAD3:%[^ ]*]] = load i16, ptr [[VTGEP3]]
+  %result = call i16 %fptr(ptr %obj)
+  ; CHECK: ret i16 [[VTLOAD3]]
+  ret i16 %result
 }
 
 declare i1 @llvm.type.test(ptr, metadata)
 declare void @llvm.assume(i1)
 declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
 
 ; CHECK: [[T8]] = !{i32 8, !"typeid"}
-; CHECK: [[T5]] = !{i32 5, !"typeid"}
+; CHECK: [[T5]] = !{i32 4, !"typeid"}
 ; CHECK: [[T16]] = !{i32 16, !"typeid"}
 ; CHECK: [[T0]] = !{i32 0, !"typeid"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid3"}
 
 !0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
index fb8b6de003fdd..d8f5c912e9a50 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
@@ -7,13 +7,16 @@
 ; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf0i1 %s 2>&1 | FileCheck %s --check-prefix=SKIP
 ; We have two set of call targets {vf0i1, vf1i1} and {vf1i32, vf2i32, vf3i32, vf4i32}.
 ; The command below prevents both of them from devirtualization.
-; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf0i1,vf1i32 %s 2>&1 | FileCheck %s --check-prefix=SKIP-ALL
+; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf0i1,vf1i32,vf3i32 %s 2>&1 | FileCheck %s --check-prefix=SKIP-ALL
 ; Check wildcard
 ; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf?i1 %s 2>&1 | FileCheck %s --check-prefix=SKIP
 
 target datalayout = "e-p:64:64"
 target triple = "x86_64-unknown-linux-gnu"
 
+; CHECK: remark: <unknown>:0:0: unique-ret-val: devirtualized a call to vf0i1
+; CHECK: remark: <unknown>:0:0: unique-ret-val: devirtualized a call to vf1i1
+; CHECK: remark: <unknown>:0:0: virtual-const-prop: devirtualized a call to vf3i32
 ; CHECK: remark: <unknown>:0:0: virtual-const-prop-1-bit: devirtualized a call to vf0i1
 ; CHECK: remark: <unknown>:0:0: virtual-const-prop-1-bit: devirtualized a call to vf1i1
 ; CHECK: remark: <unknown>:0:0: virtual-const-prop: devirtualized a call to vf1i32
@@ -34,28 +37,28 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; SKIP-ALL-NOT: devirtualized
 
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\01\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [4 x i8] c"\01\00\00\00" }, section "vt1sec", !type [[T8:![0-9]+]]
 @vt1 = constant [3 x ptr] [
 ptr @vf0i1,
 ptr @vf1i1,
 ptr @vf1i32
 ], section "vt1sec", !type !0
 
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\02\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [4 x i8] c"\02\00\00\00" }, !type [[T8]]
 @vt2 = constant [3 x ptr] [
 ptr @vf1i1,
 ptr @vf0i1,
 ptr @vf2i32
 ], !type !0
 
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\03\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [4 x i8] c"\03\00\00\00" }, !type [[T8]]
 @vt3 = constant [3 x ptr] [
 ptr @vf0i1,
 ptr @vf1i1,
 ptr @vf3i32
 ], !type !0
 
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\04\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [4 x i8] c"\04\00\00\00" }, !type [[T8]]
 @vt4 = constant [3 x ptr] [
 ptr @vf1i1,
 ptr @vf0i1,
@@ -69,10 +72,35 @@ ptr @__cxa_pure_virtual,
 ptr @__cxa_pure_virtual
 ], !type !0
 
-; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
-; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
-; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+;; Test relative vtables
+; CHECK:      [[VT6RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\03\00\00\00", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf3i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf3i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !1
+
+; CHECK:      [[VT7RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\04\00\00\00", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !1
+
+; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
 
 define i1 @vf0i1(ptr %this) readnone {
   ret i1 0
@@ -137,7 +165,44 @@ define i32 @call3(ptr %obj) {
   %vtable = load ptr, ptr %obj
   %pair = call {ptr, i1} @llvm.type.checked.load(ptr %vtable, i32 16, metadata !"typeid")
   %fptr = extractvalue {ptr, i1} %pair, 0
-  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -5
+  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 24
+  ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
+  %result = call i32 %fptr(ptr %obj)
+  ; CHECK: ret i32 [[VTLOAD3]]
+  ret i32 %result
+}
+
+; CHECK: define i1 @call1_rel(
+define i1 @call1_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i1 @call2_rel(
+define i1 @call2_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt6_rel
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i32 @call3_rel(
+define i32 @call3_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -4
   ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
   %result = call i32 %fptr(ptr %obj)
   ; CHECK: ret i32 [[VTLOAD3]]
@@ -147,12 +212,16 @@ define i32 @call3(ptr %obj) {
 declare {ptr, i1} @llvm.type.checked.load(ptr, i32, metadata)
 declare void @llvm.assume(i1)
 declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
 
 ; CHECK: [[T8]] = !{i32 8, !"typeid"}
 ; CHECK: [[T0]] = !{i32 0, !"typeid"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid2"}
 
 !0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
 
 ; CHECK: 6 wholeprogramdevirt - Number of whole program devirtualization targets
-; CHECK: 1 wholeprogramdevirt - Number of virtual constant propagations
+; CHECK: 2 wholeprogramdevirt - Number of unique return value optimizations
+; CHECK: 2 wholeprogramdevirt - Number of virtual constant propagations
 ; CHECK: 2 wholeprogramdevirt - Number of 1 bit virtual constant propagations
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
index a28c27079f8de..dd91ff6e6f3aa 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
@@ -1,9 +1,8 @@
 ; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
 
 target datalayout = "e-p:64:64"
-target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [5 x i8] c"\02\01\00\00\00" }, !type [[T8:![0-9]+]]
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [8 x i8] c"\02\00\00\00\03\00\00\00" }, !type [[T8:![0-9]+]]
 @vt1 = constant [4 x ptr] [
 ptr null,
 ptr @vf0i1,
@@ -11,14 +10,14 @@ ptr @vf1i1,
 ptr @vf1i32
 ], !type !1
 
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [5 x i8] c"\01\02\00\00\00" }, !type [[T0:![0-9]+]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [8 x i8] c"\01\00\00\00\04\00\00\00" }, !type [[T0:![0-9]+]]
 @vt2 = constant [3 x ptr] [
 ptr @vf1i1,
 ptr @vf0i1,
 ptr @vf2i32
 ], !type !0
 
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [5 x i8] c"\02\03\00\00\00" }, !type [[T8]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [8 x i8] c"\02\00\00\00\05\00\00\00" }, !type [[T8]]
 @vt3 = constant [4 x ptr] [
 ptr null,
 ptr @vf0i1,
@@ -26,17 +25,42 @@ ptr @vf1i1,
 ptr @vf3i32
 ], !type !1
 
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [5 x i8] c"\01\04\00\00\00" }, !type [[T0]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [8 x i8] c"\01\00\00\00\06\00\00\00" }, !type [[T0]]
 @vt4 = constant [3 x ptr] [
 ptr @vf1i1,
 ptr @vf0i1,
 ptr @vf4i32
 ], !type !0
 
-; CHECK: @vt1 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [5 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
-; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [5 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [5 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
-; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [5 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+;; Test relative vtables
+; CHECK:      [[VT6RELDATA:@[^ ]*]] = private constant { [0 x i8], [4 x i32], [4 x i8] } { [0 x i8] zeroinitializer, [4 x i32] [
+; CHECK-SAME:     i32 0,
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i32 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32)
+; CHECK-SAME: ], [4 x i8] c"\04\00\00\00" }, !type [[TREL:![0-9]+]]
+ at vt5_rel = constant [4 x i32] [
+i32 zeroinitializer,
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i32 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32)
+], !type !3
+
+; CHECK:      [[VT7RELDATA:@[^ ]*]] = private constant { [0 x i8], [3 x i32], [4 x i8] } { [0 x i8] zeroinitializer, [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [4 x i8] c"\06\00\00\00" }, !type [[TREL2:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
+; CHECK: @vt1 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [8 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [8 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [8 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [8 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
 
 define i1 @vf0i1(ptr %this) readnone {
   ret i1 0
@@ -47,19 +71,19 @@ define i1 @vf1i1(ptr %this) readnone {
 }
 
 define i32 @vf1i32(ptr %this) readnone {
-  ret i32 1
+  ret i32 3
 }
 
 define i32 @vf2i32(ptr %this) readnone {
-  ret i32 2
+  ret i32 4
 }
 
 define i32 @vf3i32(ptr %this) readnone {
-  ret i32 3
+  ret i32 5
 }
 
 define i32 @vf4i32(ptr %this) readnone {
-  ret i32 4
+  ret i32 6
 }
 
 ; CHECK: define i1 @call1(
@@ -100,7 +124,44 @@ define i32 @call3(ptr %obj) {
   call void @llvm.assume(i1 %p)
   %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
   %fptr = load ptr, ptr %fptrptr
-  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 25
+  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 28
+  ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
+  %result = call i32 %fptr(ptr %obj)
+  ; CHECK: ret i32 [[VTLOAD3]]
+  ret i32 %result
+}
+
+; CHECK: define i1 @call1_rel(
+define i1 @call1_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%.*]] = icmp eq ptr %vtable, getelementptr (i8, ptr @vt5_rel, i64 4)
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i1 @call2_rel(
+define i1 @call2_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt6_rel
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i32 @call3_rel(
+define i32 @call3_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+  ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 12
   ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
   %result = call i32 %fptr(ptr %obj)
   ; CHECK: ret i32 [[VTLOAD3]]
@@ -112,6 +173,10 @@ declare void @llvm.assume(i1)
 
 ; CHECK: [[T8]] = !{i32 8, !"typeid"}
 ; CHECK: [[T0]] = !{i32 0, !"typeid"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid2"}
+; CHECK: [[TREL2]] = !{i32 0, !"typeid2"}
 
 !0 = !{i32 0, !"typeid"}
 !1 = !{i32 8, !"typeid"}
+!2 = !{i32 0, !"typeid2"}
+!3 = !{i32 4, !"typeid2"}
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
new file mode 100644
index 0000000000000..52a31a28d0537
--- /dev/null
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
@@ -0,0 +1,291 @@
+; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
+
+;; This target uses 32-bit sized and aligned pointers.
+target datalayout = "e-p:32:32"
+
+;; Constant propagation should be agnostic towards sections.
+;; Also the new global should be in the original vtable's section.
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\00\00\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+ at vt1 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf1i32
+], section "vt1sec", !type !0
+
+;; This represents a normal vtable using the default ABI alignments.
+;; For this test, the pointers are 32-bit aligned.
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+ at vt2 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf2i32
+], !type !0
+
+;; This represents an underaligned vtable. This prevents constant propagation
+;; for any function returning an integer larger than this alignment. This and
+;; other vtables cannot store a 32-bit integer at the same offset because we
+;; cannot guarantee that a load from this vtable will be suitably aligned.
+;;
+;; All the functions returning i8s and i1s should still be constant-propagated
+;; because we can still do an aligned load regardless of where the 1-byte aligned
+;; vtable is.
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [2 x i8], [3 x ptr], [0 x i8] } { [2 x i8] c"\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf3i32], [0 x i8] zeroinitializer }, align 1, !type [[T5:![0-9]+]]
+ at vt3 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf3i32
+], align 1, !type !0
+
+;; This represents an overaligned vtable.
+;; Although all the integers in this test can fit within an integer less than the 16 
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf4i32], [0 x i8] zeroinitializer },  align 16, !type [[T16:![0-9]+]]
+ at vt4 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf4i32
+], align 16, !type !0
+
+;; These contain a mix of different integral type sizes. Since the natural
+;; alignment of the vtable is 32-bits though, the 64-bit constant cannot be propagated.
+; CHECK: [[VT6DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\00\00\00\0B", [3 x ptr] [ptr @vf0i1, ptr @vf10i8, ptr @vf5i64], [0 x i8] zeroinitializer }, !type [[T1:![0-9]+]]
+ at vt6 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf10i8,
+ptr @vf5i64
+], !type !1
+
+; CHECK: [[VT7DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\00\00\00\0A", [3 x ptr] [ptr @vf1i1, ptr @vf9i8, ptr @vf6i64], [0 x i8] zeroinitializer }, !type [[T1]]
+ at vt7 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf9i8,
+ptr @vf6i64
+], !type !1
+
+;; Test relative vtables
+; CHECK:      [[VT6RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\00\0B", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
+; CHECK:      [[VT7RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\00\0A", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !2
+
+; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [2 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT6DATA]], i32 0, i32 1)
+; CHECK: @vt7 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT7DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
+
+define i1 @vf0i1(ptr %this) readnone {
+  ret i1 0
+}
+
+define i1 @vf1i1(ptr %this) readnone {
+  ret i1 1
+}
+
+define i8 @vf0i8(ptr %this) readnone {
+  ret i8 2
+}
+
+define i8 @vf1i8(ptr %this) readnone {
+  ret i8 3
+}
+
+define i32 @vf1i32(ptr %this) readnone {
+  ret i32 1
+}
+
+define i32 @vf2i32(ptr %this) readnone {
+  ret i32 2
+}
+
+define i32 @vf3i32(ptr %this) readnone {
+  ret i32 3
+}
+
+define i32 @vf4i32(ptr %this) readnone {
+  ret i32 4
+}
+
+define i64 @vf5i64(ptr %this) readnone {
+  ret i64 5
+}
+
+define i64 @vf6i64(ptr %this) readnone {
+  ret i64 6
+}
+
+define i16 @vf7i16(ptr %this) readnone {
+  ret i16 7
+}
+
+define i16 @vf8i16(ptr %this) readnone {
+  ret i16 8
+}
+
+define i8 @vf9i8(ptr %this) readnone {
+  ret i8 10
+}
+
+define i8 @vf10i8(ptr %this) readnone {
+  ret i8 11
+}
+
+; CHECK: define i1 @call1(
+define i1 @call1(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+  call void @llvm.assume(i1 %p)
+  %fptr = load ptr, ptr %vtable
+  ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
+  ; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, ptr [[VTGEP1]]
+  ; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 1
+  ; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
+  %result = call i1 %fptr(ptr %obj)
+  ; CHECK: ret i1 [[VTCMP1]]
+  ret i1 %result
+}
+
+; CHECK: define i8 @call2(
+define i8 @call2(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i8 %fptr(ptr %obj)
+  ret i8 %result
+  ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -2
+  ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+  ; CHECK: ret i8 [[VTLOAD]]
+}
+
+;; We never constant propagate this since the i32 cannot reliabely loaded
+;; without misalignment from all "typeid" vtables (due to the `align 1` vtable).
+; CHECK: define i32 @call3(
+define i32 @call3(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i32 %fptr(ptr %obj)
+  ret i32 %result
+  ; CHECK: [[FPTRPTR:%.*]] = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  ; CHECK: [[FPTR:%.*]] = load ptr, ptr [[FPTRPTR]], align 4
+  ; CHECK: [[RES:%.*]] = call i32 [[FPTR]](ptr %obj)
+  ; CHECK: ret i32 [[RES]]
+}
+
+; CHECK: define i1 @call4(
+define i1 @call4(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 0
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5(
+define i64 @call5(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i64 %fptr(ptr %obj)
+  ret i64 %result
+  ; CHECK: [[FPTRPTR:%.*]] = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  ; CHECK: [[FPTR:%.*]] = load ptr, ptr [[FPTRPTR]], align 4
+  ; CHECK: [[RES:%.*]] = call i64 [[FPTR]](ptr %obj)
+  ; CHECK: ret i64 [[RES]]
+}
+
+; CHECK: define i8 @call6(
+define i8 @call6(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i8 %fptr(ptr %obj)
+  ret i8 %result
+  ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
+  ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+  ; CHECK: ret i8 [[VTLOAD]]
+}
+
+; CHECK: define i1 @call4_rel(
+define i1 @call4_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5_rel(
+define i64 @call5_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+  %result = call i64 %fptr(ptr %obj)
+  ret i64 %result
+  ; CHECK: [[FPTR:%.*]] = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+  ; CHECK: [[RES:%.*]] = call i64 [[FPTR]](ptr %obj)
+  ; CHECK: ret i64 [[RES]]
+}
+
+; CHECK: define i8 @call6_rel(
+define i8 @call6_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+  %result = call i8 %fptr(ptr %obj)
+  ret i8 %result
+  ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
+  ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+  ; CHECK: ret i8 [[VTLOAD]]
+}
+
+declare i1 @llvm.type.test(ptr, metadata)
+declare void @llvm.assume(i1)
+declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
+
+; CHECK: [[T8]] = !{i32 4, !"typeid"}
+; CHECK: [[T5]] = !{i32 2, !"typeid"}
+; CHECK: [[T16]] = !{i32 16, !"typeid"}
+; CHECK: [[T1]] = !{i32 4, !"typeid2"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid3"}
+
+!0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
new file mode 100644
index 0000000000000..f908eec0e2d80
--- /dev/null
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
@@ -0,0 +1,292 @@
+; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
+
+;; This target uses 64-bit sized and aligned pointers.
+target datalayout = "e-p:64:64"
+
+;; Constant propagation should be agnostic towards sections.
+;; Also the new global should be in the original vtable's section.
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+ at vt1 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf1i32
+], section "vt1sec", !type !0
+
+;; This represents a normal vtable using the default ABI alignments.
+;; For this test, the pointers are 64-bit aligned.
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+ at vt2 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf2i32
+], !type !0
+
+;; This represents an underaligned vtable. This prevents constant propagation
+;; for any function returning an integer larger than this alignment. This and
+;; other vtables cannot store a 32-bit integer at the same offset because we
+;; cannot guarantee that a load from this vtable will be suitably aligned.
+;;
+;; All the functions returning i8s and i1s should still be constant-propagated
+;; because we can still do an aligned load regardless of where the 1-byte aligned
+;; vtable is.
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [2 x i8], [3 x ptr], [0 x i8] } { [2 x i8] c"\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf3i32], [0 x i8] zeroinitializer }, align 1, !type [[T5:![0-9]+]]
+ at vt3 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf3i32
+], align 1, !type !0
+
+;; This represents an overaligned vtable.
+;; Although all the integers in this test can fit within an integer less than the 16 
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf4i32], [0 x i8] zeroinitializer },  align 16, !type [[T16:![0-9]+]]
+ at vt4 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf4i32
+], align 16, !type !0
+
+;; These contain a mix of different integral type sizes. All of these should be
+;; able to be constant propagated since we can always ensure they can be loaded
+;; from their respective alignment due to the vtable being aligned to a large
+;; enough value.
+; CHECK: [[VT6DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\0B\05\00\00\00\00\00\00\00", [3 x ptr] [ptr @vf0i1, ptr @vf10i8, ptr @vf5i64], [0 x i8] zeroinitializer }, !type [[T1:![0-9]+]]
+ at vt6 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf10i8,
+ptr @vf5i64
+], !type !1
+
+; CHECK: [[VT7DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\0A\06\00\00\00\00\00\00\00", [3 x ptr] [ptr @vf1i1, ptr @vf9i8, ptr @vf6i64], [0 x i8] zeroinitializer }, !type [[T1]]
+ at vt7 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf9i8,
+ptr @vf6i64
+], !type !1
+
+;; Test relative vtables
+; CHECK:      [[VT6RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\00\0B", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
+; CHECK:      [[VT7RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\00\0A", [3 x i32] [
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME:     i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !2
+
+; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [2 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT6DATA]], i32 0, i32 1)
+; CHECK: @vt7 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT7DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
+
+define i1 @vf0i1(ptr %this) readnone {
+  ret i1 0
+}
+
+define i1 @vf1i1(ptr %this) readnone {
+  ret i1 1
+}
+
+define i8 @vf0i8(ptr %this) readnone {
+  ret i8 2
+}
+
+define i8 @vf1i8(ptr %this) readnone {
+  ret i8 3
+}
+
+define i32 @vf1i32(ptr %this) readnone {
+  ret i32 1
+}
+
+define i32 @vf2i32(ptr %this) readnone {
+  ret i32 2
+}
+
+define i32 @vf3i32(ptr %this) readnone {
+  ret i32 3
+}
+
+define i32 @vf4i32(ptr %this) readnone {
+  ret i32 4
+}
+
+define i64 @vf5i64(ptr %this) readnone {
+  ret i64 5
+}
+
+define i64 @vf6i64(ptr %this) readnone {
+  ret i64 6
+}
+
+define i16 @vf7i16(ptr %this) readnone {
+  ret i16 7
+}
+
+define i16 @vf8i16(ptr %this) readnone {
+  ret i16 8
+}
+
+define i8 @vf9i8(ptr %this) readnone {
+  ret i8 10
+}
+
+define i8 @vf10i8(ptr %this) readnone {
+  ret i8 11
+}
+
+; CHECK: define i1 @call1(
+define i1 @call1(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+  call void @llvm.assume(i1 %p)
+  %fptr = load ptr, ptr %vtable
+  ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
+  ; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, ptr [[VTGEP1]]
+  ; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 1
+  ; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
+  %result = call i1 %fptr(ptr %obj)
+  ; CHECK: ret i1 [[VTCMP1]]
+  ret i1 %result
+}
+
+; CHECK: define i8 @call2(
+define i8 @call2(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i8 %fptr(ptr %obj)
+  ret i8 %result
+  ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -2
+  ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+  ; CHECK: ret i8 [[VTLOAD]]
+}
+
+;; We never constant propagate this since the i32 cannot reliabely loaded
+;; without misalignment from all "typeid" vtables (due to the `align 1` vtable).
+; CHECK: define i32 @call3(
+define i32 @call3(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i32 %fptr(ptr %obj)
+  ret i32 %result
+  ; CHECK: [[FPTRPTR:%.*]] = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  ; CHECK: [[FPTR:%.*]] = load ptr, ptr [[FPTRPTR]], align 8
+  ; CHECK: [[RES:%.*]] = call i32 [[FPTR]](ptr %obj)
+  ; CHECK: ret i32 [[RES]]
+}
+
+; CHECK: define i1 @call4(
+define i1 @call4(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 0
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5(
+define i64 @call5(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i64 %fptr(ptr %obj)
+  ret i64 %result
+  ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
+  ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
+  ; CHECK: ret i64 [[VTLOAD]]
+}
+
+; CHECK: define i8 @call6(
+define i8 @call6(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+  call void @llvm.assume(i1 %p)
+  %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+  %fptr = load ptr, ptr %fptrptr
+  %result = call i8 %fptr(ptr %obj)
+  ret i8 %result
+  ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+  ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+  ; CHECK: ret i8 [[VTLOAD]]
+}
+
+; CHECK: define i1 @call4_rel(
+define i1 @call4_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+  %result = call i1 %fptr(ptr %obj)
+  ret i1 %result
+  ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+  ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5_rel(
+define i64 @call5_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+  %result = call i64 %fptr(ptr %obj)
+  ret i64 %result
+  ; CHECK: [[FPTR:%.*]] = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+  ; CHECK: [[RES:%.*]] = call i64 [[FPTR]](ptr %obj)
+  ; CHECK: ret i64 [[RES]]
+}
+
+; CHECK: define i8 @call6_rel(
+define i8 @call6_rel(ptr %obj) {
+  %vtable = load ptr, ptr %obj
+  %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+  call void @llvm.assume(i1 %p)
+  %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+  %result = call i8 %fptr(ptr %obj)
+  ret i8 %result
+  ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
+  ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+  ; CHECK: ret i8 [[VTLOAD]]
+}
+
+declare i1 @llvm.type.test(ptr, metadata)
+declare void @llvm.assume(i1)
+declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
+
+; CHECK: [[T8]] = !{i32 8, !"typeid"}
+; CHECK: [[T5]] = !{i32 2, !"typeid"}
+; CHECK: [[T16]] = !{i32 16, !"typeid"}
+; CHECK: [[T1]] = !{i32 16, !"typeid2"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid3"}
+
+!0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
diff --git a/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp
index 8fae05bef2ae1..59cc5bc2c4e6f 100644
--- a/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp
@@ -55,7 +55,7 @@ TEST(WholeProgramDevirt, findLowestOffset) {
   VT1.After.BytesUsed = {0xff, 0, 0, 0, 0xff};
   VT2.After.BytesUsed = {0xff, 1, 0, 0, 0};
   EXPECT_EQ(16ull, findLowestOffset(Targets, /*IsAfter=*/true, 16));
-  EXPECT_EQ(40ull, findLowestOffset(Targets, /*IsAfter=*/true, 32));
+  EXPECT_EQ(64ull, findLowestOffset(Targets, /*IsAfter=*/true, 32));
 }
 
 TEST(WholeProgramDevirt, setReturnValues) {



More information about the llvm-commits mailing list