[llvm] 2661e99 - [llvm] Ensure propagated constants in the vtable are aligned (#136630)
via llvm-commits
llvm-commits at lists.llvm.org
Thu May 15 11:52:29 PDT 2025
Author: PiJoules
Date: 2025-05-15T11:52:25-07:00
New Revision: 2661e995ceebd6fd083e5b62aeff21e67b28e9a4
URL: https://github.com/llvm/llvm-project/commit/2661e995ceebd6fd083e5b62aeff21e67b28e9a4
DIFF: https://github.com/llvm/llvm-project/commit/2661e995ceebd6fd083e5b62aeff21e67b28e9a4.diff
LOG: [llvm] Ensure propagated constants in the vtable are aligned (#136630)
It's possible for virtual constant propagation in whole program
devirtualization to create unaligned loads. We originally saw this with
4-byte aligned relative vtables where we could store 8-byte values
before/after the vtable. But since the vtable is 4-byte aligned and we
unconditionally do an 8-byte load, we can't guarantee that the stored
constant will always be aligned to 8 bytes. We can also see this with
normal vtables whenever a 1-byte char is stored in the vtable because
the offset calculation for the GEP doesn't take into account the
original vtable alignment.
This patch introduces two changes to virtual constant propagation:
1. Do not propagate constants whose preferred alignment is larger than
the vtable alignment. This is required because if the constants are
stored in the vtable, we can only guarantee the constant will be stored
at an address at most aligned to the vtable's alignment.
2. Round up the offset used in the GEP before the load to ensure it's at
an address suitably aligned such that we can load from it.
This patch updates tests to reflect this alignment change and adds some
cases for relative vtables.
Added:
Modified:
llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 49c9515fa6a0b..aa527aec622bf 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -298,7 +298,9 @@ wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets,
++Byte;
}
}
- return (MinByte + I) * 8;
+ // Rounding up ensures the constant is always stored at address we
+ // can directly load from without misalignment.
+ return alignTo((MinByte + I) * 8, Size);
NextI:;
}
}
@@ -1834,9 +1836,19 @@ bool DevirtModule::tryVirtualConstProp(
if (!RetType)
return false;
unsigned BitWidth = RetType->getBitWidth();
+
+ // TODO: Since we can evaluated these constants at compile-time, we can save
+ // some space by calculating the smallest range of values that all these
+ // constants can fit in, then only allocate enough space to fit those values.
+ // At each callsite, we can get the original type by doing a sign/zero
+ // extension. For example, if we would store an i64, but we can see that all
+ // the values fit into an i16, then we can store an i16 before/after the
+ // vtable and at each callsite do a s/zext.
if (BitWidth > 64)
return false;
+ Align TypeAlignment = M.getDataLayout().getPrefTypeAlign(RetType);
+
// Make sure that each function is defined, does not access memory, takes at
// least one argument, does not use its first argument (which we assume is
// 'this'), and has the same return type.
@@ -1861,6 +1873,18 @@ bool DevirtModule::tryVirtualConstProp(
Fn->arg_empty() || !Fn->arg_begin()->use_empty() ||
Fn->getReturnType() != RetType)
return false;
+
+ // This only works if the integer size is at most the alignment of the
+ // vtable. If the table is underaligned, then we can't guarantee that the
+ // constant will always be aligned to the integer type alignment. For
+ // example, if the table is `align 1`, we can never guarantee that an i32
+ // stored before/after the vtable is 32-bit aligned without changing the
+ // alignment of the new global.
+ GlobalVariable *GV = Target.TM->Bits->GV;
+ Align TableAlignment = M.getDataLayout().getValueOrABITypeAlignment(
+ GV->getAlign(), GV->getValueType());
+ if (TypeAlignment > TableAlignment)
+ return false;
}
for (auto &&CSByConstantArg : SlotInfo.ConstCSInfo) {
@@ -1880,6 +1904,9 @@ bool DevirtModule::tryVirtualConstProp(
// Find an allocation offset in bits in all vtables associated with the
// type.
+ // TODO: If there would be "holes" in the vtable that were added by
+ // padding, we could place i1s there to reduce any extra padding that
+ // would be introduced by the i1s.
uint64_t AllocBefore =
findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth);
uint64_t AllocAfter =
@@ -1911,6 +1938,14 @@ bool DevirtModule::tryVirtualConstProp(
setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte,
OffsetBit);
+ // In an earlier check we forbade constant propagation from operating on
+ // tables whose alignment is less than the alignment needed for loading
+ // the constant. Thus, the address we take the offset from will always be
+ // aligned to at least this integer alignment. Now, we need to ensure that
+ // the offset is also aligned to this integer alignment to ensure we always
+ // have an aligned load.
+ assert(OffsetByte % TypeAlignment.value() == 0);
+
if (RemarksEnabled || AreStatisticsEnabled())
for (auto &&Target : TargetsForSlot)
Target.WasDevirt = true;
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
index b6adf1b40acf7..2654b70b90e85 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
@@ -9,28 +9,28 @@ target datalayout = "e-p:64:64"
;; preserve alignment. Making them i16s allows them to stay at the beginning of
;; the vtable. There are other tests where there's a mix of constants before and
;; after the vtable but for this file we just want everything before the vtable.
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\03\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i16], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\03\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i16], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
@vt1 = constant [3 x ptr] [
ptr @vf0i1,
ptr @vf1i1,
ptr @vf1i16
], section "vt1sec", !type !0
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\04\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i16], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\04\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i16], [0 x i8] zeroinitializer }, !type [[T8]]
@vt2 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
ptr @vf2i16
], !type !0
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\00\05\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i16], [0 x i8] zeroinitializer }, align 2, !type [[T5:![0-9]+]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\05\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i16], [0 x i8] zeroinitializer }, align 2, !type [[T5:![0-9]+]]
@vt3 = constant [3 x ptr] [
ptr @vf0i1,
ptr @vf1i1,
ptr @vf3i16
], align 2, !type !0
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\06\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i16], [0 x i8] zeroinitializer }, align 16, !type [[T16:![0-9]+]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\06\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i16], [0 x i8] zeroinitializer }, align 16, !type [[T16:![0-9]+]]
@vt4 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
@@ -136,7 +136,7 @@ define i16 @call3(ptr %obj) {
call void @llvm.assume(i1 %p)
%fptrptr = getelementptr [3 x ptr], ptr %vtable, i16 0, i16 2
%fptr = load ptr, ptr %fptrptr
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -3
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -4
; CHECK: [[VTLOAD3:%[^ ]*]] = load i16, ptr [[VTGEP3]]
%result = call i16 %fptr(ptr %obj)
; CHECK: ret i16 [[VTLOAD3]]
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
index 40adabbe38400..d8f5c912e9a50 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
@@ -37,28 +37,28 @@ target triple = "x86_64-unknown-linux-gnu"
; SKIP-ALL-NOT: devirtualized
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\01\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [4 x i8] c"\01\00\00\00" }, section "vt1sec", !type [[T8:![0-9]+]]
@vt1 = constant [3 x ptr] [
ptr @vf0i1,
ptr @vf1i1,
ptr @vf1i32
], section "vt1sec", !type !0
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\02\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [4 x i8] c"\02\00\00\00" }, !type [[T8]]
@vt2 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
ptr @vf2i32
], !type !0
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\03\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [4 x i8] c"\03\00\00\00" }, !type [[T8]]
@vt3 = constant [3 x ptr] [
ptr @vf0i1,
ptr @vf1i1,
ptr @vf3i32
], !type !0
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\04\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [4 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [4 x i8] c"\04\00\00\00" }, !type [[T8]]
@vt4 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
@@ -95,10 +95,10 @@ i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 p
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
], !type !1
-; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
-; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
-; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [4 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
@@ -165,7 +165,7 @@ define i32 @call3(ptr %obj) {
%vtable = load ptr, ptr %obj
%pair = call {ptr, i1} @llvm.type.checked.load(ptr %vtable, i32 16, metadata !"typeid")
%fptr = extractvalue {ptr, i1} %pair, 0
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -5
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 24
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
%result = call i32 %fptr(ptr %obj)
; CHECK: ret i32 [[VTLOAD3]]
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
index e0f9b6dbe2ac5..dd91ff6e6f3aa 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
@@ -2,7 +2,7 @@
target datalayout = "e-p:64:64"
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [5 x i8] c"\02\03\00\00\00" }, !type [[T8:![0-9]+]]
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [8 x i8] c"\02\00\00\00\03\00\00\00" }, !type [[T8:![0-9]+]]
@vt1 = constant [4 x ptr] [
ptr null,
ptr @vf0i1,
@@ -10,14 +10,14 @@ ptr @vf1i1,
ptr @vf1i32
], !type !1
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [5 x i8] c"\01\04\00\00\00" }, !type [[T0:![0-9]+]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [8 x i8] c"\01\00\00\00\04\00\00\00" }, !type [[T0:![0-9]+]]
@vt2 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
ptr @vf2i32
], !type !0
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [5 x i8] c"\02\05\00\00\00" }, !type [[T8]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [8 x i8] c"\02\00\00\00\05\00\00\00" }, !type [[T8]]
@vt3 = constant [4 x ptr] [
ptr null,
ptr @vf0i1,
@@ -25,7 +25,7 @@ ptr @vf1i1,
ptr @vf3i32
], !type !1
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [5 x i8] c"\01\06\00\00\00" }, !type [[T0]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [8 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [8 x i8] c"\01\00\00\00\06\00\00\00" }, !type [[T0]]
@vt4 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
@@ -57,10 +57,10 @@ i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 p
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
], !type !2
-; CHECK: @vt1 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [5 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
-; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [5 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [5 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
-; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [5 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt1 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [8 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [8 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [8 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [8 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
define i1 @vf0i1(ptr %this) readnone {
ret i1 0
@@ -124,7 +124,7 @@ define i32 @call3(ptr %obj) {
call void @llvm.assume(i1 %p)
%fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
%fptr = load ptr, ptr %fptrptr
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 25
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 28
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
%result = call i32 %fptr(ptr %obj)
; CHECK: ret i32 [[VTLOAD3]]
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
index fd703712ceb2c..ab76f2c22e343 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
@@ -5,8 +5,8 @@ target datalayout = "e-p:32:32"
;; Constant propagation should be agnostic towards sections.
;; Also the new global should be in the original vtable's section.
-; CHECK: [[VT1DATA:@[^ ]*]] = {{.*}} { [8 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [8 x i8] c"\00\00\01\00\00\00\03\00",
+; CHECK: [[VT1DATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x ptr], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\03\00",
; CHECK-SAME: }, section "vt1sec", !type [[T8:![0-9]+]]
@vt1 = constant [3 x ptr] [
ptr @vf0i1,
@@ -23,8 +23,8 @@ ptr @vf1i32
;; according to the datalayout, this could result in an unaligned load.
;; 2. The call instruction in @call3 is replaced with a GEP + load.
;;
-; CHECK: [[VT2DATA:@[^ ]*]] = {{.*}} { [8 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [8 x i8] c"\00\00\02\00\00\00\02\01",
+; CHECK: [[VT2DATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x ptr], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\02\01",
; CHECK-SAME: !type [[T8]]
@vt2 = constant [3 x ptr] [
ptr @vf1i1,
@@ -37,8 +37,8 @@ ptr @vf2i32
;; All the functions returning i8s and i1s should still be constant-propagated
;; because we can still do an aligned load regardless of where the 1-byte aligned
;; vtable is.
-; CHECK: [[VT3DATA:@[^ ]*]] = {{.*}} { [6 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [6 x i8] c"\03\00\00\00\03\00",
+; CHECK: [[VT3DATA:@[^ ]*]] = {{.*}} { [2 x i8], [3 x ptr], [0 x i8] }
+; CHECK-SAME: [2 x i8] c"\03\00",
; CHECK-SAME: }, align 1, !type [[T5:![0-9]+]]
@vt3 = constant [3 x ptr] [
ptr @vf0i1,
@@ -48,7 +48,7 @@ ptr @vf3i32
;; This represents an overaligned vtable.
; CHECK: [[VT4DATA:@[^ ]*]] = {{.*}} { [16 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\04\00\00\00\02\01",
+; CHECK-SAME: [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\00\02\01",
; CHECK-SAME: }, align 16, !type [[T16:![0-9]+]]
@vt4 = constant [3 x ptr] [
ptr @vf1i1,
@@ -57,8 +57,8 @@ ptr @vf4i32
], align 16, !type !0
;; These contain a mix of
diff erent integral type sizes.
-; CHECK: [[VT6DATA:@[^ ]*]] = {{.*}} { [12 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [12 x i8] c"\00\00\00\0B\05\00\00\00\00\00\00\00",
+; CHECK: [[VT6DATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x ptr], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\00\0B",
; CHECK-SAME: }, !type [[T1:![0-9]+]]
@vt6 = constant [3 x ptr] [
ptr @vf0i1,
@@ -66,8 +66,8 @@ ptr @vf10i8,
ptr @vf5i64
], !type !1
-; CHECK: [[VT7DATA:@[^ ]*]] = {{.*}} { [12 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [12 x i8] c"\00\00\00\0A\06\00\00\00\00\00\00\00",
+; CHECK: [[VT7DATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x ptr], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\00\0A",
; CHECK-SAME: }, !type [[T1]]
@vt7 = constant [3 x ptr] [
ptr @vf1i1,
@@ -76,8 +76,8 @@ ptr @vf6i64
], !type !1
;; Test relative vtables
-; CHECK: [[VT6RELDATA:@[^ ]*]] = {{.*}} { [12 x i8], [3 x i32], [0 x i8] }
-; CHECK-SAME: [12 x i8] c"\00\00\00\0B\05\00\00\00\00\00\00\00",
+; CHECK: [[VT6RELDATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x i32], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\00\0B",
; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
@vt6_rel = constant [3 x i32] [
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
@@ -85,8 +85,8 @@ i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
], !type !2
-; CHECK: [[VT7RELDATA:@[^ ]*]] = {{.*}} { [12 x i8], [3 x i32], [0 x i8] }
-; CHECK-SAME: [12 x i8] c"\00\00\00\0A\06\00\00\00\00\00\00\00",
+; CHECK: [[VT7RELDATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x i32], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\00\0A",
; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
@vt7_rel = constant [3 x i32] [
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
@@ -94,14 +94,14 @@ i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 p
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
], !type !2
-; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
-; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [6 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [2 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
-; CHECK: @vt6 = alias [3 x ptr], getelementptr inbounds ({ [12 x i8], [3 x ptr], [0 x i8] }, ptr [[VT6DATA]], i32 0, i32 1)
-; CHECK: @vt7 = alias [3 x ptr], getelementptr inbounds ({ [12 x i8], [3 x ptr], [0 x i8] }, ptr [[VT7DATA]], i32 0, i32 1)
-; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
-; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
+; CHECK: @vt6 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT6DATA]], i32 0, i32 1)
+; CHECK: @vt7 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT7DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
define i1 @vf0i1(ptr %this) readnone {
ret i1 0
@@ -199,9 +199,10 @@ define i32 @call3(ptr %obj) {
%fptr = load ptr, ptr %fptrptr
%result = call i32 %fptr(ptr %obj)
ret i32 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -6
- ; CHECK: [[VTLOAD:%[^ ]*]] = load i32, ptr [[VTGEP2]]
- ; CHECK: ret i32 [[VTLOAD]]
+ ; CHECK: [[FPTRPTR:%.*]] = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ ; CHECK: [[FPTR:%.*]] = load ptr, ptr [[FPTRPTR]], align 4
+ ; CHECK: [[RES:%.*]] = call i32 [[FPTR]](ptr %obj)
+ ; CHECK: ret i32 [[RES]]
}
; CHECK-LABEL: define i1 @call4(
@@ -226,9 +227,10 @@ define i64 @call5(ptr %obj) {
%fptr = load ptr, ptr %fptrptr
%result = call i64 %fptr(ptr %obj)
ret i64 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
- ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
- ; CHECK: ret i64 [[VTLOAD]]
+ ; CHECK: [[FPTRPTR:%.*]] = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ ; CHECK: [[FPTR:%.*]] = load ptr, ptr [[FPTRPTR]], align 4
+ ; CHECK: [[RES:%.*]] = call i64 [[FPTR]](ptr %obj)
+ ; CHECK: ret i64 [[RES]]
}
; CHECK-LABEL: define i8 @call6(
@@ -240,7 +242,7 @@ define i8 @call6(ptr %obj) {
%fptr = load ptr, ptr %fptrptr
%result = call i8 %fptr(ptr %obj)
ret i8 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
; CHECK: ret i8 [[VTLOAD]]
}
@@ -265,9 +267,9 @@ define i64 @call5_rel(ptr %obj) {
%fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
%result = call i64 %fptr(ptr %obj)
ret i64 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
- ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
- ; CHECK: ret i64 [[VTLOAD]]
+ ; CHECK: [[FPTR:%.*]] = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+ ; CHECK: [[RES:%.*]] = call i64 [[FPTR]](ptr %obj)
+ ; CHECK: ret i64 [[RES]]
}
; CHECK-LABEL: define i8 @call6_rel(
@@ -278,7 +280,7 @@ define i8 @call6_rel(ptr %obj) {
%fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
%result = call i8 %fptr(ptr %obj)
ret i8 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
; CHECK: ret i8 [[VTLOAD]]
}
@@ -288,11 +290,11 @@ declare void @llvm.assume(i1)
declare void @__cxa_pure_virtual()
declare ptr @llvm.load.relative.i32(ptr, i32)
-; CHECK: [[T8]] = !{i32 8, !"typeid"}
-; CHECK: [[T5]] = !{i32 6, !"typeid"}
+; CHECK: [[T8]] = !{i32 4, !"typeid"}
+; CHECK: [[T5]] = !{i32 2, !"typeid"}
; CHECK: [[T16]] = !{i32 16, !"typeid"}
-; CHECK: [[T1]] = !{i32 12, !"typeid2"}
-; CHECK: [[TREL]] = !{i32 12, !"typeid3"}
+; CHECK: [[T1]] = !{i32 4, !"typeid2"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid3"}
!0 = !{i32 0, !"typeid"}
!1 = !{i32 0, !"typeid2"}
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
index ce4a0180dfc92..c83fbc6ed5a19 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
@@ -6,7 +6,7 @@ target datalayout = "e-p:64:64"
;; Constant propagation should be agnostic towards sections.
;; Also the new global should be in the original vtable's section.
; CHECK: [[VT1DATA:@[^ ]*]] = {{.*}} { [8 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [8 x i8] c"\00\00\01\00\00\00\03\00",
+; CHECK-SAME: [8 x i8] c"\00\00\00\00\00\00\03\00",
; CHECK-SAME: }, section "vt1sec", !type [[T8:![0-9]+]]
@vt1 = constant [3 x ptr] [
ptr @vf0i1,
@@ -24,7 +24,7 @@ ptr @vf1i32
;; 2. The call instruction in @call3 is replaced with a GEP + load.
;;
; CHECK: [[VT2DATA:@[^ ]*]] = {{.*}} { [8 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [8 x i8] c"\00\00\02\00\00\00\02\01",
+; CHECK-SAME: [8 x i8] c"\00\00\00\00\00\00\02\01",
; CHECK-SAME: !type [[T8]]
@vt2 = constant [3 x ptr] [
ptr @vf1i1,
@@ -37,8 +37,8 @@ ptr @vf2i32
;; All the functions returning i8s and i1s should still be constant-propagated
;; because we can still do an aligned load regardless of where the 1-byte aligned
;; vtable is.
-; CHECK: [[VT3DATA:@[^ ]*]] = {{.*}} { [6 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [6 x i8] c"\03\00\00\00\03\00",
+; CHECK: [[VT3DATA:@[^ ]*]] = {{.*}} { [2 x i8], [3 x ptr], [0 x i8] }
+; CHECK-SAME: [2 x i8] c"\03\00",
; CHECK-SAME: }, align 1, !type [[T5:![0-9]+]]
@vt3 = constant [3 x ptr] [
ptr @vf0i1,
@@ -48,7 +48,7 @@ ptr @vf3i32
;; This represents an overaligned vtable.
; CHECK: [[VT4DATA:@[^ ]*]] = {{.*}} { [16 x i8], [3 x ptr], [0 x i8] }
-; CHECK-SAME: [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\04\00\00\00\02\01",
+; CHECK-SAME: [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\00\02\01",
; CHECK-SAME: }, align 16, !type [[T16:![0-9]+]]
@vt4 = constant [3 x ptr] [
ptr @vf1i1,
@@ -79,8 +79,8 @@ ptr @vf6i64
], !type !1
;; Test relative vtables
-; CHECK: [[VT6RELDATA:@[^ ]*]] = {{.*}} { [12 x i8], [3 x i32], [0 x i8] }
-; CHECK-SAME: [12 x i8] c"\00\00\00\0B\05\00\00\00\00\00\00\00",
+; CHECK: [[VT6RELDATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x i32], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\00\0B",
; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
@vt6_rel = constant [3 x i32] [
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
@@ -88,8 +88,8 @@ i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
], !type !2
-; CHECK: [[VT7RELDATA:@[^ ]*]] = {{.*}} { [12 x i8], [3 x i32], [0 x i8] }
-; CHECK-SAME: [12 x i8] c"\00\00\00\0A\06\00\00\00\00\00\00\00",
+; CHECK: [[VT7RELDATA:@[^ ]*]] = {{.*}} { [4 x i8], [3 x i32], [0 x i8] }
+; CHECK-SAME: [4 x i8] c"\00\00\00\0A",
; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
@vt7_rel = constant [3 x i32] [
i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
@@ -99,12 +99,12 @@ i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64
; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [6 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [2 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
; CHECK: @vt6 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT6DATA]], i32 0, i32 1)
; CHECK: @vt7 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT7DATA]], i32 0, i32 1)
-; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
-; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
define i1 @vf0i1(ptr %this) readnone {
ret i1 0
@@ -200,9 +200,10 @@ define i32 @call3(ptr %obj) {
%fptr = load ptr, ptr %fptrptr
%result = call i32 %fptr(ptr %obj)
ret i32 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -6
- ; CHECK: [[VTLOAD:%[^ ]*]] = load i32, ptr [[VTGEP2]]
- ; CHECK: ret i32 [[VTLOAD]]
+ ; CHECK: [[FPTRPTR:%.*]] = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ ; CHECK: [[FPTR:%.*]] = load ptr, ptr [[FPTRPTR]], align 8
+ ; CHECK: [[RES:%.*]] = call i32 [[FPTR]](ptr %obj)
+ ; CHECK: ret i32 [[RES]]
}
; CHECK-LABEL: define i1 @call4(
@@ -266,9 +267,9 @@ define i64 @call5_rel(ptr %obj) {
%fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
%result = call i64 %fptr(ptr %obj)
ret i64 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
- ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
- ; CHECK: ret i64 [[VTLOAD]]
+ ; CHECK: [[FPTR:%.*]] = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+ ; CHECK: [[RES:%.*]] = call i64 [[FPTR]](ptr %obj)
+ ; CHECK: ret i64 [[RES]]
}
; CHECK-LABEL: define i8 @call6_rel(
@@ -279,7 +280,7 @@ define i8 @call6_rel(ptr %obj) {
%fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
%result = call i8 %fptr(ptr %obj)
ret i8 %result
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
; CHECK: ret i8 [[VTLOAD]]
}
@@ -290,10 +291,10 @@ declare void @__cxa_pure_virtual()
declare ptr @llvm.load.relative.i32(ptr, i32)
; CHECK: [[T8]] = !{i32 8, !"typeid"}
-; CHECK: [[T5]] = !{i32 6, !"typeid"}
+; CHECK: [[T5]] = !{i32 2, !"typeid"}
; CHECK: [[T16]] = !{i32 16, !"typeid"}
; CHECK: [[T1]] = !{i32 16, !"typeid2"}
-; CHECK: [[TREL]] = !{i32 12, !"typeid3"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid3"}
!0 = !{i32 0, !"typeid"}
!1 = !{i32 0, !"typeid2"}
diff --git a/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp
index 8fae05bef2ae1..59cc5bc2c4e6f 100644
--- a/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/unittests/Transforms/IPO/WholeProgramDevirt.cpp
@@ -55,7 +55,7 @@ TEST(WholeProgramDevirt, findLowestOffset) {
VT1.After.BytesUsed = {0xff, 0, 0, 0, 0xff};
VT2.After.BytesUsed = {0xff, 1, 0, 0, 0};
EXPECT_EQ(16ull, findLowestOffset(Targets, /*IsAfter=*/true, 16));
- EXPECT_EQ(40ull, findLowestOffset(Targets, /*IsAfter=*/true, 32));
+ EXPECT_EQ(64ull, findLowestOffset(Targets, /*IsAfter=*/true, 32));
}
TEST(WholeProgramDevirt, setReturnValues) {
More information about the llvm-commits
mailing list