[llvm] [NFC][WPD] Add constant propagation tests checking relative vtables (PR #138989)
via llvm-commits
llvm-commits at lists.llvm.org
Wed May 7 16:31:31 PDT 2025
https://github.com/PiJoules created https://github.com/llvm/llvm-project/pull/138989
This is a patch with precommitted tests to make
https://github.com/llvm/llvm-project/pull/136630 easier to review. The `virtual-const-prop-small-alignment-*` tests check the output when the loaded int alignment is less than the vtable alignment.
This also changes some constants to make it easier to differentiate between propagated values in vtables.
>From 0a8b2cf472e7a2147bac6ca84fcc67e2bd3d060e Mon Sep 17 00:00:00 2001
From: Leonard Chan <leonardchan at google.com>
Date: Wed, 7 May 2025 23:23:40 +0000
Subject: [PATCH] [NFC][WPD] Add constant propagation tests to account for
relative vtables
This is a patch with precommitted tests to make
https://github.com/llvm/llvm-project/pull/136630 easier to review. The
`virtual-const-prop-small-alignment-*` tests check the output when the
loaded int alignment is less than the vtable alignment.
This also changes some constants to make it easier to differentiate
between propagated values in vtables.
---
.../virtual-const-prop-begin.ll | 125 ++++++--
.../virtual-const-prop-check.ll | 73 ++++-
.../virtual-const-prop-end.ll | 83 ++++-
.../virtual-const-prop-small-alignment-32.ll | 284 +++++++++++++++++
.../virtual-const-prop-small-alignment-64.ll | 285 ++++++++++++++++++
5 files changed, 810 insertions(+), 40 deletions(-)
create mode 100644 llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
create mode 100644 llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
index 11273804e2081..2163d440e9a33 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-begin.ll
@@ -1,34 +1,35 @@
; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
target datalayout = "e-p:64:64"
-target triple = "x86_64-unknown-linux-gnu"
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\01\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+;; Note that i16 is used here such that we can ensure all constants for "typeid"
+;; can come before the vtable.
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\03\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf1i16], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
@vt1 = constant [3 x ptr] [
ptr @vf0i1,
ptr @vf1i1,
-ptr @vf1i32
+ptr @vf1i16
], section "vt1sec", !type !0
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\02\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\04\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i16], [0 x i8] zeroinitializer }, !type [[T8]]
@vt2 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
-ptr @vf2i32
+ptr @vf2i16
], !type !0
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [5 x i8], [3 x ptr], [0 x i8] } { [5 x i8] c"\03\00\00\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [0 x i8] zeroinitializer }, align 1, !type [[T5:![0-9]+]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [4 x i8], [3 x ptr], [0 x i8] } { [4 x i8] c"\00\05\00\02", [3 x ptr] [ptr @vf0i1, ptr @vf1i1, ptr @vf3i16], [0 x i8] zeroinitializer }, align 2, !type [[T5:![0-9]+]]
@vt3 = constant [3 x ptr] [
ptr @vf0i1,
ptr @vf1i1,
-ptr @vf3i32
-], align 1, !type !0
+ptr @vf3i16
+], align 2, !type !0
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\04\00\00\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [0 x i8] zeroinitializer }, align 16, !type [[T16:![0-9]+]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\06\00\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i16], [0 x i8] zeroinitializer }, align 16, !type [[T16:![0-9]+]]
@vt4 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
-ptr @vf4i32
+ptr @vf4i16
], align 16, !type !0
; CHECK: @vt5 = {{.*}}, !type [[T0:![0-9]+]]
@@ -38,10 +39,35 @@ ptr @__cxa_pure_virtual,
ptr @__cxa_pure_virtual
], !type !0
+;; Test relative vtables
+; CHECK: [[VT6RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\03\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i16 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i16 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
+; CHECK: [[VT7RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\00\00\04\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i16 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i16 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !2
+
; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
-; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [5 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [4 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
define i1 @vf0i1(ptr %this) readnone {
ret i1 0
@@ -51,20 +77,20 @@ define i1 @vf1i1(ptr %this) readnone {
ret i1 1
}
-define i32 @vf1i32(ptr %this) readnone {
- ret i32 1
+define i16 @vf1i16(ptr %this) readnone {
+ ret i16 3
}
-define i32 @vf2i32(ptr %this) readnone {
- ret i32 2
+define i16 @vf2i16(ptr %this) readnone {
+ ret i16 4
}
-define i32 @vf3i32(ptr %this) readnone {
- ret i32 3
+define i16 @vf3i16(ptr %this) readnone {
+ ret i16 5
}
-define i32 @vf4i32(ptr %this) readnone {
- ret i32 4
+define i16 @vf4i16(ptr %this) readnone {
+ ret i16 6
}
; CHECK: define i1 @call1(
@@ -87,7 +113,7 @@ define i1 @call2(ptr %obj) {
%vtable = load ptr, ptr %obj
%p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
call void @llvm.assume(i1 %p)
- %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i16 0, i16 1
%fptr = load ptr, ptr %fptrptr
; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, ptr [[VTGEP2]]
@@ -98,27 +124,68 @@ define i1 @call2(ptr %obj) {
ret i1 %result
}
-; CHECK: define i32 @call3(
-define i32 @call3(ptr %obj) {
+; CHECK: define i16 @call3(
+define i16 @call3(ptr %obj) {
%vtable = load ptr, ptr %obj
%p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
call void @llvm.assume(i1 %p)
- %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i16 0, i16 2
%fptr = load ptr, ptr %fptrptr
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -5
- ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
- %result = call i32 %fptr(ptr %obj)
- ; CHECK: ret i32 [[VTLOAD3]]
- ret i32 %result
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -3
+ ; CHECK: [[VTLOAD3:%[^ ]*]] = load i16, ptr [[VTGEP3]]
+ %result = call i16 %fptr(ptr %obj)
+ ; CHECK: ret i16 [[VTLOAD3]]
+ ret i16 %result
+}
+
+; CHECK: define i1 @call1_rel(
+define i1 @call1_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i1 @call2_rel(
+define i1 @call2_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt6_rel
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i16 @call3_rel(
+define i16 @call3_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -2
+ ; CHECK: [[VTLOAD3:%[^ ]*]] = load i16, ptr [[VTGEP3]]
+ %result = call i16 %fptr(ptr %obj)
+ ; CHECK: ret i16 [[VTLOAD3]]
+ ret i16 %result
}
declare i1 @llvm.type.test(ptr, metadata)
declare void @llvm.assume(i1)
declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
; CHECK: [[T8]] = !{i32 8, !"typeid"}
-; CHECK: [[T5]] = !{i32 5, !"typeid"}
+; CHECK: [[T5]] = !{i32 4, !"typeid"}
; CHECK: [[T16]] = !{i32 16, !"typeid"}
; CHECK: [[T0]] = !{i32 0, !"typeid"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid3"}
!0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
index fb8b6de003fdd..40adabbe38400 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll
@@ -7,13 +7,16 @@
; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf0i1 %s 2>&1 | FileCheck %s --check-prefix=SKIP
; We have two set of call targets {vf0i1, vf1i1} and {vf1i32, vf2i32, vf3i32, vf4i32}.
; The command below prevents both of them from devirtualization.
-; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf0i1,vf1i32 %s 2>&1 | FileCheck %s --check-prefix=SKIP-ALL
+; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf0i1,vf1i32,vf3i32 %s 2>&1 | FileCheck %s --check-prefix=SKIP-ALL
; Check wildcard
; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf?i1 %s 2>&1 | FileCheck %s --check-prefix=SKIP
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
+; CHECK: remark: <unknown>:0:0: unique-ret-val: devirtualized a call to vf0i1
+; CHECK: remark: <unknown>:0:0: unique-ret-val: devirtualized a call to vf1i1
+; CHECK: remark: <unknown>:0:0: virtual-const-prop: devirtualized a call to vf3i32
; CHECK: remark: <unknown>:0:0: virtual-const-prop-1-bit: devirtualized a call to vf0i1
; CHECK: remark: <unknown>:0:0: virtual-const-prop-1-bit: devirtualized a call to vf1i1
; CHECK: remark: <unknown>:0:0: virtual-const-prop: devirtualized a call to vf1i32
@@ -69,10 +72,35 @@ ptr @__cxa_pure_virtual,
ptr @__cxa_pure_virtual
], !type !0
+;; Test relative vtables
+; CHECK: [[VT6RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\03\00\00\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf3i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf3i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !1
+
+; CHECK: [[VT7RELDATA:@[^ ]*]] = private constant { [4 x i8], [3 x i32], [0 x i8] } { [4 x i8] c"\04\00\00\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !1
+
; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [4 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
define i1 @vf0i1(ptr %this) readnone {
ret i1 0
@@ -144,15 +172,56 @@ define i32 @call3(ptr %obj) {
ret i32 %result
}
+; CHECK: define i1 @call1_rel(
+define i1 @call1_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i1 @call2_rel(
+define i1 @call2_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt6_rel
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i32 @call3_rel(
+define i32 @call3_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -4
+ ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
+ %result = call i32 %fptr(ptr %obj)
+ ; CHECK: ret i32 [[VTLOAD3]]
+ ret i32 %result
+}
+
declare {ptr, i1} @llvm.type.checked.load(ptr, i32, metadata)
declare void @llvm.assume(i1)
declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
; CHECK: [[T8]] = !{i32 8, !"typeid"}
; CHECK: [[T0]] = !{i32 0, !"typeid"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid2"}
!0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
; CHECK: 6 wholeprogramdevirt - Number of whole program devirtualization targets
-; CHECK: 1 wholeprogramdevirt - Number of virtual constant propagations
+; CHECK: 2 wholeprogramdevirt - Number of unique return value optimizations
+; CHECK: 2 wholeprogramdevirt - Number of virtual constant propagations
; CHECK: 2 wholeprogramdevirt - Number of 1 bit virtual constant propagations
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
index a28c27079f8de..e0f9b6dbe2ac5 100644
--- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-end.ll
@@ -1,9 +1,8 @@
; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
target datalayout = "e-p:64:64"
-target triple = "x86_64-unknown-linux-gnu"
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [5 x i8] c"\02\01\00\00\00" }, !type [[T8:![0-9]+]]
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf1i32], [5 x i8] c"\02\03\00\00\00" }, !type [[T8:![0-9]+]]
@vt1 = constant [4 x ptr] [
ptr null,
ptr @vf0i1,
@@ -11,14 +10,14 @@ ptr @vf1i1,
ptr @vf1i32
], !type !1
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [5 x i8] c"\01\02\00\00\00" }, !type [[T0:![0-9]+]]
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf2i32], [5 x i8] c"\01\04\00\00\00" }, !type [[T0:![0-9]+]]
@vt2 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
ptr @vf2i32
], !type !0
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [5 x i8] c"\02\03\00\00\00" }, !type [[T8]]
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [4 x ptr] [ptr null, ptr @vf0i1, ptr @vf1i1, ptr @vf3i32], [5 x i8] c"\02\05\00\00\00" }, !type [[T8]]
@vt3 = constant [4 x ptr] [
ptr null,
ptr @vf0i1,
@@ -26,13 +25,38 @@ ptr @vf1i1,
ptr @vf3i32
], !type !1
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [5 x i8] c"\01\04\00\00\00" }, !type [[T0]]
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x ptr], [5 x i8] } { [0 x i8] zeroinitializer, [3 x ptr] [ptr @vf1i1, ptr @vf0i1, ptr @vf4i32], [5 x i8] c"\01\06\00\00\00" }, !type [[T0]]
@vt4 = constant [3 x ptr] [
ptr @vf1i1,
ptr @vf0i1,
ptr @vf4i32
], !type !0
+;; Test relative vtables
+; CHECK: [[VT6RELDATA:@[^ ]*]] = private constant { [0 x i8], [4 x i32], [4 x i8] } { [0 x i8] zeroinitializer, [4 x i32] [
+; CHECK-SAME: i32 0,
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i32 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32)
+; CHECK-SAME: ], [4 x i8] c"\04\00\00\00" }, !type [[TREL:![0-9]+]]
+ at vt5_rel = constant [4 x i32] [
+i32 zeroinitializer,
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf2i32 to i64), i64 ptrtoint (ptr @vt5_rel to i64)) to i32)
+], !type !3
+
+; CHECK: [[VT7RELDATA:@[^ ]*]] = private constant { [0 x i8], [3 x i32], [4 x i8] } { [0 x i8] zeroinitializer, [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [4 x i8] c"\06\00\00\00" }, !type [[TREL2:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf4i32 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
; CHECK: @vt1 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [5 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [0 x i8], [3 x ptr], [5 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
; CHECK: @vt3 = alias [4 x ptr], getelementptr inbounds ({ [0 x i8], [4 x ptr], [5 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
@@ -47,19 +71,19 @@ define i1 @vf1i1(ptr %this) readnone {
}
define i32 @vf1i32(ptr %this) readnone {
- ret i32 1
+ ret i32 3
}
define i32 @vf2i32(ptr %this) readnone {
- ret i32 2
+ ret i32 4
}
define i32 @vf3i32(ptr %this) readnone {
- ret i32 3
+ ret i32 5
}
define i32 @vf4i32(ptr %this) readnone {
- ret i32 4
+ ret i32 6
}
; CHECK: define i1 @call1(
@@ -107,11 +131,52 @@ define i32 @call3(ptr %obj) {
ret i32 %result
}
+; CHECK: define i1 @call1_rel(
+define i1 @call1_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%.*]] = icmp eq ptr %vtable, getelementptr (i8, ptr @vt5_rel, i64 4)
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i1 @call2_rel(
+define i1 @call2_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt6_rel
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i32 @call3_rel(
+define i32 @call3_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 12
+ ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, ptr [[VTGEP3]]
+ %result = call i32 %fptr(ptr %obj)
+ ; CHECK: ret i32 [[VTLOAD3]]
+ ret i32 %result
+}
+
declare i1 @llvm.type.test(ptr, metadata)
declare void @llvm.assume(i1)
; CHECK: [[T8]] = !{i32 8, !"typeid"}
; CHECK: [[T0]] = !{i32 0, !"typeid"}
+; CHECK: [[TREL]] = !{i32 4, !"typeid2"}
+; CHECK: [[TREL2]] = !{i32 0, !"typeid2"}
!0 = !{i32 0, !"typeid"}
!1 = !{i32 8, !"typeid"}
+!2 = !{i32 0, !"typeid2"}
+!3 = !{i32 4, !"typeid2"}
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
new file mode 100644
index 0000000000000..4947d17fc65d9
--- /dev/null
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-32.ll
@@ -0,0 +1,284 @@
+; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
+
+;; This target uses 32-bit sized and aligned pointers.
+target datalayout = "e-p:32:32"
+
+;; Constant propagation should be agnostic towards sections.
+;; Also the new global should be in the original vtable's section.
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\01\00\00\00\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+ at vt1 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf1i32
+], section "vt1sec", !type !0
+
+;; This represents a normal vtable using the default ABI alignments.
+;; For this test, the pointers are 32-bit aligned.
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\02\00\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+ at vt2 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf2i32
+], !type !0
+
+;; This represents an underaligned vtable.
+;;
+;; All the functions returning i8s and i1s should still be constant-propagated
+;; because we can still do an aligned load regardless of where the 1-byte aligned
+;; vtable is.
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [6 x i8], [3 x ptr], [0 x i8] } { [6 x i8] c"\03\00\00\00\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf3i32], [0 x i8] zeroinitializer }, align 1, !type [[T5:![0-9]+]]
+ at vt3 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf3i32
+], align 1, !type !0
+
+;; This represents an overaligned vtable.
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\04\00\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf4i32], [0 x i8] zeroinitializer }, align 16, !type [[T16:![0-9]+]]
+ at vt4 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf4i32
+], align 16, !type !0
+
+;; These contain a mix of different integral type sizes.
+; CHECK: [[VT6DATA:@[^ ]*]] = private constant { [12 x i8], [3 x ptr], [0 x i8] } { [12 x i8] c"\00\00\00\0B\05\00\00\00\00\00\00\00", [3 x ptr] [ptr @vf0i1, ptr @vf10i8, ptr @vf5i64], [0 x i8] zeroinitializer }, !type [[T1:![0-9]+]]
+ at vt6 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf10i8,
+ptr @vf5i64
+], !type !1
+
+; CHECK: [[VT7DATA:@[^ ]*]] = private constant { [12 x i8], [3 x ptr], [0 x i8] } { [12 x i8] c"\00\00\00\0A\06\00\00\00\00\00\00\00", [3 x ptr] [ptr @vf1i1, ptr @vf9i8, ptr @vf6i64], [0 x i8] zeroinitializer }, !type [[T1]]
+ at vt7 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf9i8,
+ptr @vf6i64
+], !type !1
+
+;; Test relative vtables
+; CHECK: [[VT6RELDATA:@[^ ]*]] = private constant { [12 x i8], [3 x i32], [0 x i8] } { [12 x i8] c"\00\00\00\0B\05\00\00\00\00\00\00\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
+; CHECK: [[VT7RELDATA:@[^ ]*]] = private constant { [12 x i8], [3 x i32], [0 x i8] } { [12 x i8] c"\00\00\00\0A\06\00\00\00\00\00\00\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !2
+
+; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [6 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6 = alias [3 x ptr], getelementptr inbounds ({ [12 x i8], [3 x ptr], [0 x i8] }, ptr [[VT6DATA]], i32 0, i32 1)
+; CHECK: @vt7 = alias [3 x ptr], getelementptr inbounds ({ [12 x i8], [3 x ptr], [0 x i8] }, ptr [[VT7DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
+
+define i1 @vf0i1(ptr %this) readnone {
+ ret i1 0
+}
+
+define i1 @vf1i1(ptr %this) readnone {
+ ret i1 1
+}
+
+define i8 @vf0i8(ptr %this) readnone {
+ ret i8 2
+}
+
+define i8 @vf1i8(ptr %this) readnone {
+ ret i8 3
+}
+
+define i32 @vf1i32(ptr %this) readnone {
+ ret i32 1
+}
+
+define i32 @vf2i32(ptr %this) readnone {
+ ret i32 2
+}
+
+define i32 @vf3i32(ptr %this) readnone {
+ ret i32 3
+}
+
+define i32 @vf4i32(ptr %this) readnone {
+ ret i32 4
+}
+
+define i64 @vf5i64(ptr %this) readnone {
+ ret i64 5
+}
+
+define i64 @vf6i64(ptr %this) readnone {
+ ret i64 6
+}
+
+define i16 @vf7i16(ptr %this) readnone {
+ ret i16 7
+}
+
+define i16 @vf8i16(ptr %this) readnone {
+ ret i16 8
+}
+
+define i8 @vf9i8(ptr %this) readnone {
+ ret i8 10
+}
+
+define i8 @vf10i8(ptr %this) readnone {
+ ret i8 11
+}
+
+; CHECK: define i1 @call1(
+define i1 @call1(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptr = load ptr, ptr %vtable
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
+ ; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, ptr [[VTGEP1]]
+ ; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 1
+ ; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
+ %result = call i1 %fptr(ptr %obj)
+ ; CHECK: ret i1 [[VTCMP1]]
+ ret i1 %result
+}
+
+; CHECK: define i8 @call2(
+define i8 @call2(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i8 %fptr(ptr %obj)
+ ret i8 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -2
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+ ; CHECK: ret i8 [[VTLOAD]]
+}
+
+;; We never constant propagate this since the i32 cannot reliabely loaded
+;; without misalignment from all "typeid" vtables (due to the `align 1` vtable).
+; CHECK: define i32 @call3(
+define i32 @call3(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i32 %fptr(ptr %obj)
+ ret i32 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -6
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i32, ptr [[VTGEP2]]
+ ; CHECK: ret i32 [[VTLOAD]]
+}
+
+; CHECK: define i1 @call4(
+define i1 @call4(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 0
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5(
+define i64 @call5(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i64 %fptr(ptr %obj)
+ ret i64 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
+ ; CHECK: ret i64 [[VTLOAD]]
+}
+
+; CHECK: define i8 @call6(
+define i8 @call6(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i8 %fptr(ptr %obj)
+ ret i8 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+ ; CHECK: ret i8 [[VTLOAD]]
+}
+
+; CHECK: define i1 @call4_rel(
+define i1 @call4_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5_rel(
+define i64 @call5_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+ %result = call i64 %fptr(ptr %obj)
+ ret i64 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
+ ; CHECK: ret i64 [[VTLOAD]]
+}
+
+; CHECK: define i8 @call6_rel(
+define i8 @call6_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+ %result = call i8 %fptr(ptr %obj)
+ ret i8 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+ ; CHECK: ret i8 [[VTLOAD]]
+}
+
+declare i1 @llvm.type.test(ptr, metadata)
+declare void @llvm.assume(i1)
+declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
+
+; CHECK: [[T8]] = !{i32 8, !"typeid"}
+; CHECK: [[T5]] = !{i32 6, !"typeid"}
+; CHECK: [[T16]] = !{i32 16, !"typeid"}
+; CHECK: [[T1]] = !{i32 12, !"typeid2"}
+; CHECK: [[TREL]] = !{i32 12, !"typeid3"}
+
+!0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
new file mode 100644
index 0000000000000..1534d1d44c10c
--- /dev/null
+++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-small-alignment-64.ll
@@ -0,0 +1,285 @@
+; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility %s | FileCheck %s
+
+;; This target uses 64-bit sized and aligned pointers.
+target datalayout = "e-p:64:64"
+
+;; Constant propagation should be agnostic towards sections.
+;; Also the new global should be in the original vtable's section.
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\01\00\00\00\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf1i32], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+ at vt1 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf1i32
+], section "vt1sec", !type !0
+
+;; This represents a normal vtable using the default ABI alignments.
+;; For this test, the pointers are 64-bit aligned.
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x ptr], [0 x i8] } { [8 x i8] c"\00\00\02\00\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf2i32], [0 x i8] zeroinitializer }, !type [[T8]]
+ at vt2 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf2i32
+], !type !0
+
+;; This represents an underaligned vtable.
+;;
+;; All the functions returning i8s and i1s should still be constant-propagated
+;; because we can still do an aligned load regardless of where the 1-byte aligned
+;; vtable is.
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [6 x i8], [3 x ptr], [0 x i8] } { [6 x i8] c"\03\00\00\00\03\00", [3 x ptr] [ptr @vf0i1, ptr @vf1i8, ptr @vf3i32], [0 x i8] zeroinitializer }, align 1, !type [[T5:![0-9]+]]
+ at vt3 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf1i8,
+ptr @vf3i32
+], align 1, !type !0
+
+;; This represents an overaligned vtable.
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\00\00\00\04\00\00\00\02\01", [3 x ptr] [ptr @vf1i1, ptr @vf0i8, ptr @vf4i32], [0 x i8] zeroinitializer }, align 16, !type [[T16:![0-9]+]]
+ at vt4 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf0i8,
+ptr @vf4i32
+], align 16, !type !0
+
+;; These contain a mix of different integral type sizes. All of these should be
+;; able to be constant propagated since we can always ensure they can be loaded
+;; from their respective alignment due to the vtable being aligned to a large
+;; enough value.
+; CHECK: [[VT6DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\0B\05\00\00\00\00\00\00\00", [3 x ptr] [ptr @vf0i1, ptr @vf10i8, ptr @vf5i64], [0 x i8] zeroinitializer }, !type [[T1:![0-9]+]]
+ at vt6 = constant [3 x ptr] [
+ptr @vf0i1,
+ptr @vf10i8,
+ptr @vf5i64
+], !type !1
+
+; CHECK: [[VT7DATA:@[^ ]*]] = private constant { [16 x i8], [3 x ptr], [0 x i8] } { [16 x i8] c"\00\00\00\00\00\00\00\0A\06\00\00\00\00\00\00\00", [3 x ptr] [ptr @vf1i1, ptr @vf9i8, ptr @vf6i64], [0 x i8] zeroinitializer }, !type [[T1]]
+ at vt7 = constant [3 x ptr] [
+ptr @vf1i1,
+ptr @vf9i8,
+ptr @vf6i64
+], !type !1
+
+;; Test relative vtables
+; CHECK: [[VT6RELDATA:@[^ ]*]] = private constant { [12 x i8], [3 x i32], [0 x i8] } { [12 x i8] c"\00\00\00\0B\05\00\00\00\00\00\00\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL:![0-9]+]]
+ at vt6_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf0i1 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf10i8 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf5i64 to i64), i64 ptrtoint (ptr @vt6_rel to i64)) to i32)
+], !type !2
+
+; CHECK: [[VT7RELDATA:@[^ ]*]] = private constant { [12 x i8], [3 x i32], [0 x i8] } { [12 x i8] c"\00\00\00\0A\06\00\00\00\00\00\00\00", [3 x i32] [
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+; CHECK-SAME: i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+; CHECK-SAME: ], [0 x i8] zeroinitializer }, !type [[TREL]]
+ at vt7_rel = constant [3 x i32] [
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf1i1 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf9i8 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32),
+i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf6i64 to i64), i64 ptrtoint (ptr @vt7_rel to i64)) to i32)
+], !type !2
+
+; CHECK: @vt1 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x ptr], getelementptr inbounds ({ [8 x i8], [3 x ptr], [0 x i8] }, ptr [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x ptr], getelementptr inbounds ({ [6 x i8], [3 x ptr], [0 x i8] }, ptr [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT4DATA]], i32 0, i32 1)
+; CHECK: @vt6 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT6DATA]], i32 0, i32 1)
+; CHECK: @vt7 = alias [3 x ptr], getelementptr inbounds ({ [16 x i8], [3 x ptr], [0 x i8] }, ptr [[VT7DATA]], i32 0, i32 1)
+; CHECK: @vt6_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT6RELDATA]], i32 0, i32 1)
+; CHECK: @vt7_rel = alias [3 x i32], getelementptr inbounds ({ [12 x i8], [3 x i32], [0 x i8] }, ptr [[VT7RELDATA]], i32 0, i32 1)
+
+define i1 @vf0i1(ptr %this) readnone {
+ ret i1 0
+}
+
+define i1 @vf1i1(ptr %this) readnone {
+ ret i1 1
+}
+
+define i8 @vf0i8(ptr %this) readnone {
+ ret i8 2
+}
+
+define i8 @vf1i8(ptr %this) readnone {
+ ret i8 3
+}
+
+define i32 @vf1i32(ptr %this) readnone {
+ ret i32 1
+}
+
+define i32 @vf2i32(ptr %this) readnone {
+ ret i32 2
+}
+
+define i32 @vf3i32(ptr %this) readnone {
+ ret i32 3
+}
+
+define i32 @vf4i32(ptr %this) readnone {
+ ret i32 4
+}
+
+define i64 @vf5i64(ptr %this) readnone {
+ ret i64 5
+}
+
+define i64 @vf6i64(ptr %this) readnone {
+ ret i64 6
+}
+
+define i16 @vf7i16(ptr %this) readnone {
+ ret i16 7
+}
+
+define i16 @vf8i16(ptr %this) readnone {
+ ret i16 8
+}
+
+define i8 @vf9i8(ptr %this) readnone {
+ ret i8 10
+}
+
+define i8 @vf10i8(ptr %this) readnone {
+ ret i8 11
+}
+
+; CHECK: define i1 @call1(
+define i1 @call1(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptr = load ptr, ptr %vtable
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -1
+ ; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, ptr [[VTGEP1]]
+ ; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 1
+ ; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
+ %result = call i1 %fptr(ptr %obj)
+ ; CHECK: ret i1 [[VTCMP1]]
+ ret i1 %result
+}
+
+; CHECK: define i8 @call2(
+define i8 @call2(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i8 %fptr(ptr %obj)
+ ret i8 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -2
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+ ; CHECK: ret i8 [[VTLOAD]]
+}
+
+; CHECK: define i32 @call3(
+define i32 @call3(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i32 %fptr(ptr %obj)
+ ret i32 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -6
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i32, ptr [[VTGEP2]]
+ ; CHECK: ret i32 [[VTLOAD]]
+}
+
+; CHECK: define i1 @call4(
+define i1 @call4(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 0
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5(
+define i64 @call5(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 2
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i64 %fptr(ptr %obj)
+ ret i64 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
+ ; CHECK: ret i64 [[VTLOAD]]
+}
+
+; CHECK: define i8 @call6(
+define i8 @call6(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2")
+ call void @llvm.assume(i1 %p)
+ %fptrptr = getelementptr [3 x ptr], ptr %vtable, i32 0, i32 1
+ %fptr = load ptr, ptr %fptrptr
+ %result = call i8 %fptr(ptr %obj)
+ ret i8 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+ ; CHECK: ret i8 [[VTLOAD]]
+}
+
+; CHECK: define i1 @call4_rel(
+define i1 @call4_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0)
+ %result = call i1 %fptr(ptr %obj)
+ ret i1 %result
+ ; CHECK: [[RES:%[^ ]*]] = icmp eq ptr %vtable, @vt7_rel
+ ; CHECK: ret i1 [[RES]]
+}
+
+; CHECK: define i64 @call5_rel(
+define i64 @call5_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8)
+ %result = call i64 %fptr(ptr %obj)
+ ret i64 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -8
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i64, ptr [[VTGEP2]]
+ ; CHECK: ret i64 [[VTLOAD]]
+}
+
+; CHECK: define i8 @call6_rel(
+define i8 @call6_rel(ptr %obj) {
+ %vtable = load ptr, ptr %obj
+ %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3")
+ call void @llvm.assume(i1 %p)
+ %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 4)
+ %result = call i8 %fptr(ptr %obj)
+ ret i8 %result
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, ptr %vtable, i32 -9
+ ; CHECK: [[VTLOAD:%[^ ]*]] = load i8, ptr [[VTGEP2]]
+ ; CHECK: ret i8 [[VTLOAD]]
+}
+
+declare i1 @llvm.type.test(ptr, metadata)
+declare void @llvm.assume(i1)
+declare void @__cxa_pure_virtual()
+declare ptr @llvm.load.relative.i32(ptr, i32)
+
+; CHECK: [[T8]] = !{i32 8, !"typeid"}
+; CHECK: [[T5]] = !{i32 6, !"typeid"}
+; CHECK: [[T16]] = !{i32 16, !"typeid"}
+; CHECK: [[T1]] = !{i32 16, !"typeid2"}
+; CHECK: [[TREL]] = !{i32 12, !"typeid3"}
+
+!0 = !{i32 0, !"typeid"}
+!1 = !{i32 0, !"typeid2"}
+!2 = !{i32 0, !"typeid3"}
More information about the llvm-commits
mailing list