[llvm] fa25025 - Migrate llvm.experimental.patchpoint() to ptr.
Edd Barrett via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 10 05:18:17 PDT 2022
Author: Edd Barrett
Date: 2022-08-10T13:18:02+01:00
New Revision: fa250250b2966613fba7b88575029eb7cf0da66c
URL: https://github.com/llvm/llvm-project/commit/fa250250b2966613fba7b88575029eb7cf0da66c
DIFF: https://github.com/llvm/llvm-project/commit/fa250250b2966613fba7b88575029eb7cf0da66c.diff
LOG: Migrate llvm.experimental.patchpoint() to ptr.
This intrinsic used a typed pointer for a call target operand. This
change updates the operand to be an opaque pointer and updates all
pointers in all test files that use the intrinsic.
Differential revision: https://reviews.llvm.org/D131261
Added:
Modified:
llvm/docs/StackMaps.rst
llvm/test/Analysis/CallGraph/non-leaf-intrinsics.ll
llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll
llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll
llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
llvm/test/CodeGen/AArch64/arm64-stackmap.ll
llvm/test/CodeGen/AArch64/stackmap-liveness.ll
llvm/test/CodeGen/AArch64/stackmap.ll
llvm/test/CodeGen/MIR/X86/liveout-register-mask.mir
llvm/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
llvm/test/CodeGen/PowerPC/ppc64-anyregcc.ll
llvm/test/CodeGen/PowerPC/ppc64-patchpoint.ll
llvm/test/CodeGen/PowerPC/ppc64-stackmap.ll
llvm/test/CodeGen/SystemZ/anyregcc-novec.ll
llvm/test/CodeGen/SystemZ/anyregcc-vec.ll
llvm/test/CodeGen/SystemZ/anyregcc.ll
llvm/test/CodeGen/SystemZ/patchpoint-invoke.ll
llvm/test/CodeGen/SystemZ/patchpoint.ll
llvm/test/CodeGen/SystemZ/stackmap.ll
llvm/test/CodeGen/X86/patchpoint-verifiable.mir
llvm/test/CodeGen/X86/selectiondag-patchpoint-legalize.ll
Removed:
################################################################################
diff --git a/llvm/docs/StackMaps.rst b/llvm/docs/StackMaps.rst
index 783336e2d8065..826393bd29186 100644
--- a/llvm/docs/StackMaps.rst
+++ b/llvm/docs/StackMaps.rst
@@ -148,9 +148,9 @@ For example, a stack map with 8 byte shadow:
.. code-block:: llvm
call void @runtime()
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 77, i32 8,
- i64* %ptr)
- %val = load i64* %ptr
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 77, i32 8,
+ ptr %ptr)
+ %val = load i64, ptr %ptr
%add = add i64 %val, 3
ret i64 %add
@@ -188,10 +188,10 @@ Syntax:
declare void
@llvm.experimental.patchpoint.void(i64 <id>, i32 <numBytes>,
- i8* <target>, i32 <numArgs>, ...)
+ ptr <target>, i32 <numArgs>, ...)
declare i64
@llvm.experimental.patchpoint.i64(i64 <id>, i32 <numBytes>,
- i8* <target>, i32 <numArgs>, ...)
+ ptr <target>, i32 <numArgs>, ...)
Overview:
"""""""""
@@ -260,10 +260,10 @@ in $rdi, and a return value in $rax per native calling convention:
.. code-block:: llvm
- %target = inttoptr i64 -281474976710654 to i8*
- %val = call i64 (i64, i32, ...)*
+ %target = inttoptr i64 -281474976710654 to ptr
+ %val = call i64 (i64, i32, ...)
@llvm.experimental.patchpoint.i64(i64 78, i32 15,
- i8* %target, i32 1, i64* %ptr)
+ ptr %target, i32 1, ptr %ptr)
%add = add i64 %val, 3
ret i64 %add
@@ -285,8 +285,8 @@ registers, then the ``anyregcc`` convention may be used:
.. code-block:: none
%val = call anyregcc @llvm.experimental.patchpoint(i64 78, i32 15,
- i8* %target, i32 1,
- i64* %ptr)
+ ptr %target, i32 1,
+ ptr %ptr)
The stack map now indicates the location of the %ptr argument and
return value:
@@ -492,7 +492,7 @@ For example:
entry:
%a = alloca i64...
- llvm.experimental.stackmap(i64 <ID>, i32 <shadowBytes>, i64* %a)
+ llvm.experimental.stackmap(i64 <ID>, i32 <shadowBytes>, ptr %a)
The runtime can determine this alloca's relative location on the
stack immediately after compilation, or at any time thereafter. This
diff --git a/llvm/test/Analysis/CallGraph/non-leaf-intrinsics.ll b/llvm/test/Analysis/CallGraph/non-leaf-intrinsics.ll
index 4f6fb7f539a70..9aec2a4331eb5 100644
--- a/llvm/test/Analysis/CallGraph/non-leaf-intrinsics.ll
+++ b/llvm/test/Analysis/CallGraph/non-leaf-intrinsics.ll
@@ -1,23 +1,22 @@
; RUN: opt -S -print-callgraph -disable-output < %s 2>&1 | FileCheck %s
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, ptr, i32, i32, ...)
define private void @f() {
ret void
}
-define void @calls_statepoint(i8 addrspace(1)* %arg) gc "statepoint-example" {
+define void @calls_statepoint(ptr addrspace(1) %arg) gc "statepoint-example" {
entry:
- %cast = bitcast i8 addrspace(1)* %arg to i64 addrspace(1)*
- %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @f, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i8 addrspace(1)* %arg, i64 addrspace(1)* %cast, i8 addrspace(1)* %arg, i8 addrspace(1)* %arg), "deopt" (i32 0, i32 0, i32 0, i32 10, i32 0)]
+ %safepoint_token = call token (i64, i32, ptr, i32, i32, ...)
+ @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, ptr elementtype(void ()) @f, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg), "deopt" (i32 0, i32 0, i32 0, i32 10, i32 0)]
ret void
}
define void @calls_patchpoint() {
entry:
- %c = bitcast void()* @f to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 15, i8* %c, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 15, ptr @f, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
ret void
}
diff --git a/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll b/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
index 2d0731913b14c..d754517a51f6c 100644
--- a/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
+++ b/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
@@ -1,18 +1,17 @@
; RUN: opt -S -disable-output -passes=print-lcg < %s 2>&1 | FileCheck %s
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, ptr, i32, i32, ...)
define private void @f() {
ret void
}
-define void @calls_statepoint(i8 addrspace(1)* %arg) gc "statepoint-example" {
+define void @calls_statepoint(ptr addrspace(1) %arg) gc "statepoint-example" {
; CHECK: Edges in function: calls_statepoint
; CHECK-NEXT: -> f
entry:
- %cast = bitcast i8 addrspace(1)* %arg to i64 addrspace(1)*
- %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @f, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i8 addrspace(1)* %arg, i64 addrspace(1)* %cast, i8 addrspace(1)* %arg, i8 addrspace(1)* %arg), "deopt" (i32 0, i32 0, i32 0, i32 10, i32 0)]
+ %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, ptr elementtype(void ()) @f, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg), "deopt" (i32 0, i32 0, i32 0, i32 10, i32 0)]
ret void
}
@@ -20,7 +19,6 @@ define void @calls_patchpoint() {
; CHECK: Edges in function: calls_patchpoint
; CHECK-NEXT: -> f
entry:
- %c = bitcast void()* @f to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 15, i8* %c, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 15, ptr @f, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
index f8852262a65c4..b4fb9b613233d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
@@ -8,7 +8,7 @@ define i64 @anyreglimit(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i6
i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32) {
entry:
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 16, i8* inttoptr (i64 0 to i8*), i32 32,
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 16, ptr inttoptr (i64 0 to ptr), i32 32,
i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8,
i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16,
i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
@@ -16,4 +16,4 @@ entry:
ret i64 %result
}
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
index 33bbfa2d81d9f..225d4c602f181 100644
--- a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
@@ -69,7 +69,7 @@
; CHECK-NEXT: .long 3
define i64 @test() nounwind ssp uwtable {
entry:
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 16, i8* null, i32 2, i32 1, i32 2, i64 3)
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 16, ptr null, i32 2, i32 1, i32 2, i64 3)
ret i64 0
}
@@ -92,10 +92,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @property_access1(i8* %obj) nounwind ssp uwtable {
+define i64 @property_access1(ptr %obj) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 20, i8* %f, i32 1, i8* %obj)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 20, ptr %f, i32 1, ptr %obj)
ret i64 %ret
}
@@ -121,8 +121,8 @@ entry:
define i64 @property_access2() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %f, i32 1, i64* %obj)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 20, ptr %f, i32 1, ptr %obj)
ret i64 %ret
}
@@ -148,8 +148,8 @@ entry:
define i64 @property_access3() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 20, i8* %f, i32 0, i64* %obj)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 20, ptr %f, i32 0, ptr %obj)
ret i64 %ret
}
@@ -256,10 +256,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+define i64 @anyreg_test1(ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 20, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 20, ptr %f, i32 13, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13)
ret i64 %ret
}
@@ -366,10 +366,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+define i64 @anyreg_test2(ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, ptr %f, i32 8, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13)
ret i64 %ret
}
@@ -403,7 +403,7 @@ entry:
; CHECK-NEXT: .long 0
define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 16, ptr inttoptr (i64 0 to ptr), i32 2, i64 %p1, i64 %p2)
tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind
ret i64 %result
}
@@ -453,9 +453,9 @@ entry:
define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 16, ptr inttoptr (i64 0 to ptr), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
ret i64 %result
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll
index 2651f119412bd..04bfe9b4d9d71 100644
--- a/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll
@@ -6,13 +6,13 @@
; CHECK-NEXT: mov x{{[0-9]+}}, x17
; CHECK-NEXT: Ltmp
; CHECK-NEXT: nop
-define void @clobberScratch(i32* %p) {
- %v = load i32, i32* %p
+define void @clobberScratch(ptr %p) {
+ %v = load i32, ptr %p
tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 5, i32 20, i8* null, i32 0, i32* %p, i32 %v)
- store i32 %v, i32* %p
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 5, i32 20, ptr null, i32 0, ptr %p, i32 %v)
+ store i32 %v, ptr %p
ret void
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll
index 45bdcaa36da21..d47c2178d7cec 100644
--- a/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll
@@ -27,10 +27,10 @@ entry:
; FAST-NEXT: movk x16, #57005, lsl #16
; FAST-NEXT: movk x16, #48879
; FAST-NEXT: blr x16
- %resolveCall2 = inttoptr i64 281474417671919 to i8*
- %result = tail call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveCall2, i32 2, i64 %p4, i64 %p2)
- %resolveCall3 = inttoptr i64 244837814038255 to i8*
- tail call webkit_jscc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveCall3, i32 2, i64 %p4, i64 %result)
+ %resolveCall2 = inttoptr i64 281474417671919 to ptr
+ %result = tail call webkit_jscc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, ptr %resolveCall2, i32 2, i64 %p4, i64 %p2)
+ %resolveCall3 = inttoptr i64 244837814038255 to ptr
+ tail call webkit_jscc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 20, ptr %resolveCall3, i32 2, i64 %p4, i64 %result)
ret void
}
@@ -61,8 +61,8 @@ entry:
; FAST-NEXT: movk x16, #57005, lsl #16
; FAST-NEXT: movk x16, #48879
; FAST-NEXT: blr x16
- %call = inttoptr i64 281474417671919 to i8*
- %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6)
+ %call = inttoptr i64 281474417671919 to ptr
+ %result = call webkit_jscc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, ptr %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6)
ret i64 %result
}
@@ -101,8 +101,8 @@ entry:
; FAST-NEXT: movk x16, #57005, lsl #16
; FAST-NEXT: movk x16, #48879
; FAST-NEXT: blr x16
- %call = inttoptr i64 281474417671919 to i8*
- %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10)
+ %call = inttoptr i64 281474417671919 to ptr
+ %result = call webkit_jscc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, ptr %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10)
ret i64 %result
}
@@ -114,5 +114,5 @@ define webkit_jscc zeroext i16 @test_i16(i16 zeroext %a, i16 zeroext %b) {
ret i16 %sum
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
index 0111d841450df..a493cd796ed4d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
@@ -15,10 +15,10 @@ entry:
; CHECK-NEXT: movk x16, #51967
; CHECK-NEXT: blr x16
; CHECK: ret
- %resolveCall2 = inttoptr i64 244837814094590 to i8*
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
- %resolveCall3 = inttoptr i64 244837814094591 to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 20, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ %resolveCall2 = inttoptr i64 244837814094590 to ptr
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 20, ptr %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %resolveCall3 = inttoptr i64 244837814094591 to ptr
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 20, ptr %resolveCall3, i32 2, i64 %p1, i64 %result)
ret i64 %result
}
@@ -37,10 +37,10 @@ entry:
define void @caller_meta_leaf() {
entry:
%metadata = alloca i64, i32 3, align 8
- store i64 11, i64* %metadata
- store i64 12, i64* %metadata
- store i64 13, i64* %metadata
- call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+ store i64 11, ptr %metadata
+ store i64 12, ptr %metadata
+ store i64 13, ptr %metadata
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata)
ret void
}
@@ -48,19 +48,19 @@ entry:
; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
; There is no way to verify this, since it depends on memory allocation.
; But I think it's useful to include as a working example.
-define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) {
+define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, ptr %tmp33, i64 %tmp79) {
entry:
%tmp80 = add i64 %tmp79, -16
- %tmp81 = inttoptr i64 %tmp80 to i64*
- %tmp82 = load i64, i64* %tmp81, align 8
+ %tmp81 = inttoptr i64 %tmp80 to ptr
+ %tmp82 = load i64, ptr %tmp81, align 8
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
- %tmp83 = load i64, i64* %tmp33, align 8
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 32, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
+ %tmp83 = load i64, ptr %tmp33, align 8
%tmp84 = add i64 %tmp83, -24
- %tmp85 = inttoptr i64 %tmp84 to i64*
- %tmp86 = load i64, i64* %tmp85, align 8
+ %tmp85 = inttoptr i64 %tmp84 to ptr
+ %tmp86 = load i64, ptr %tmp85, align 8
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 32, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
ret i64 10
}
@@ -76,11 +76,11 @@ entry:
; CHECK-NEXT: nop
; CHECK-NEXT: ldp
; CHECK-NEXT: ret
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, ptr null, i32 2, i64 %p1, i64 %p2)
ret void
}
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-stackmap.ll b/llvm/test/CodeGen/AArch64/arm64-stackmap.ll
index bc4ea434f31e2..5036f3f67f425 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stackmap.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stackmap.ll
@@ -114,8 +114,8 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define void @constantargs() {
entry:
- %0 = inttoptr i64 244837814094590 to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 20, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296, i128 66, i128 4294967297)
+ %0 = inttoptr i64 244837814094590 to ptr
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 20, ptr %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296, i128 66, i128 4294967297)
ret void
}
@@ -139,7 +139,7 @@ entry:
define void @osrinline(i64 %a, i64 %b) {
entry:
; Runtime void->void call.
- call void inttoptr (i64 244837814094590 to void ()*)()
+ call void inttoptr (i64 244837814094590 to ptr)()
; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
ret void
@@ -170,8 +170,8 @@ entry:
br i1 %test, label %ret, label %cold
cold:
; OSR patchpoint with 12-byte nop-slide and 2 live vars.
- %thunk = inttoptr i64 244837814094590 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 20, i8* %thunk, i32 0, i64 %a, i64 %b)
+ %thunk = inttoptr i64 244837814094590 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 20, ptr %thunk, i32 0, i64 %a, i64 %b)
unreachable
ret:
ret void
@@ -184,10 +184,10 @@ ret:
;
; FIXME: There are currently no stackmap entries. After moving to
; AnyRegCC, we will have entries for the object and return value.
-define i64 @propertyRead(i64* %obj) {
+define i64 @propertyRead(ptr %obj) {
entry:
- %resolveRead = inttoptr i64 244837814094590 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveRead, i32 1, i64* %obj)
+ %resolveRead = inttoptr i64 244837814094590 to ptr
+ %result = call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, ptr %resolveRead, i32 1, ptr %obj)
%add = add i64 %result, 3
ret i64 %add
}
@@ -208,10 +208,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
+define void @propertyWrite(i64 %dummy1, ptr %obj, i64 %dummy2, i64 %a) {
entry:
- %resolveWrite = inttoptr i64 244837814094590 to i8*
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ %resolveWrite = inttoptr i64 244837814094590 to ptr
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 20, ptr %resolveWrite, i32 2, ptr %obj, i64 %a)
ret void
}
@@ -234,10 +234,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define void @jsVoidCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 244837814094590 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 244837814094590 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 20, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
ret void
}
@@ -260,10 +260,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define i64 @jsIntCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 244837814094590 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 244837814094590 to ptr
+ %result = call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 20, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
%add = add i64 %result, 3
ret i64 %add
}
@@ -286,7 +286,7 @@ entry:
; CHECK-NEXT: .long
define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
entry:
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 20, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 20, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
ret void
}
@@ -401,10 +401,10 @@ define void @floats(float %f, double %g) {
%ff = alloca float
%gg = alloca double
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 888, i32 0, float 1.25,
- double 1.5, float %f, double %g, float* %ff, double* %gg)
+ double 1.5, float %f, double %g, ptr %ff, ptr %gg)
ret void
}
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/stackmap-liveness.ll b/llvm/test/CodeGen/AArch64/stackmap-liveness.ll
index b66dbfae6c8a4..e1f9ffe42a77c 100644
--- a/llvm/test/CodeGen/AArch64/stackmap-liveness.ll
+++ b/llvm/test/CodeGen/AArch64/stackmap-liveness.ll
@@ -40,8 +40,8 @@ define i64 @stackmap_liveness(i1 %c) {
; Align
; CHECK-NEXT: .p2align 3
%1 = select i1 %c, i64 1, i64 2
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 32, i8* null, i32 0)
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 32, ptr null, i32 0)
ret i64 %1
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/stackmap.ll b/llvm/test/CodeGen/AArch64/stackmap.ll
index e89f73a303b27..ab238f732671a 100644
--- a/llvm/test/CodeGen/AArch64/stackmap.ll
+++ b/llvm/test/CodeGen/AArch64/stackmap.ll
@@ -176,8 +176,8 @@
define void @constantargs() {
entry:
- %0 = inttoptr i64 12345 to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 16, i8* %0, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1, i128 66, i128 4294967297)
+ %0 = inttoptr i64 12345 to ptr
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 16, ptr %0, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1, i128 66, i128 4294967297)
ret void
}
@@ -201,7 +201,7 @@ entry:
define void @osrinline(i64 %a, i64 %b) {
entry:
; Runtime void->void call.
- call void inttoptr (i64 -559038737 to void ()*)()
+ call void inttoptr (i64 -559038737 to ptr)()
; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
ret void
@@ -232,8 +232,8 @@ entry:
br i1 %test, label %ret, label %cold
cold:
; OSR patchpoint with 12-byte nop-slide and 2 live vars.
- %thunk = inttoptr i64 3735928559 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 16, i8* %thunk, i32 0, i64 %a, i64 %b)
+ %thunk = inttoptr i64 3735928559 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 16, ptr %thunk, i32 0, i64 %a, i64 %b)
unreachable
ret:
ret void
@@ -255,10 +255,10 @@ ret:
; CHECK-NEXT: .hword {{[0-9]+}}
; CHECK-NEXT: .hword 0
; CHECK-NEXT: .word 0
-define i64 @propertyRead(i64* %obj) {
+define i64 @propertyRead(ptr %obj) {
entry:
- %resolveRead = inttoptr i64 3735928559 to i8*
- %result = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 16, i8* %resolveRead, i32 1, i64* %obj)
+ %resolveRead = inttoptr i64 3735928559 to ptr
+ %result = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 16, ptr %resolveRead, i32 1, ptr %obj)
%add = add i64 %result, 3
ret i64 %add
}
@@ -279,10 +279,10 @@ entry:
; CHECK-NEXT: .hword {{[0-9]+}}
; CHECK-NEXT: .hword 0
; CHECK-NEXT: .word 0
-define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
+define void @propertyWrite(i64 %dummy1, ptr %obj, i64 %dummy2, i64 %a) {
entry:
- %resolveWrite = inttoptr i64 3735928559 to i8*
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 16, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ %resolveWrite = inttoptr i64 3735928559 to ptr
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 16, ptr %resolveWrite, i32 2, ptr %obj, i64 %a)
ret void
}
@@ -305,10 +305,10 @@ entry:
; CHECK-NEXT: .hword {{[0-9]+}}
; CHECK-NEXT: .hword 0
; CHECK-NEXT: .word 0
-define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define void @jsVoidCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 3735928559 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 16, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 3735928559 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 16, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
ret void
}
@@ -331,10 +331,10 @@ entry:
; CHECK-NEXT: .hword {{[0-9]+}}
; CHECK-NEXT: .hword 0
; CHECK-NEXT: .word 0
-define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define i64 @jsIntCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 3735928559 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 16, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 3735928559 to ptr
+ %result = call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 16, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
%add = add i64 %result, 3
ret i64 %add
}
@@ -357,7 +357,7 @@ entry:
; CHECK-NEXT: .word
define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
entry:
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 20, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 20, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
ret void
}
@@ -439,13 +439,13 @@ define void @liveConstant() {
define void @directFrameIdx() {
entry:
%metadata1 = alloca i64, i32 3, align 8
- store i64 11, i64* %metadata1
- store i64 12, i64* %metadata1
- store i64 13, i64* %metadata1
- call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1)
+ store i64 11, ptr %metadata1
+ store i64 12, ptr %metadata1
+ store i64 13, ptr %metadata1
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, ptr %metadata1)
%metadata2 = alloca i8, i32 4, align 8
%metadata3 = alloca i16, i32 4, align 8
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 4, i8* null, i32 0, i8* %metadata2, i16* %metadata3)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 4, ptr null, i32 0, ptr %metadata2, ptr %metadata3)
ret void
}
@@ -461,10 +461,10 @@ entry:
; CHECK-LABEL: .word .L{{.*}}-longid
define void @longid() {
entry:
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, i8* null, i32 0)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, i8* null, i32 0)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, i8* null, i32 0)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, i8* null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, ptr null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, ptr null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, ptr null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, ptr null, i32 0)
ret void
}
@@ -496,7 +496,7 @@ define void @clobberLR(i32 %a) {
; CHECK-NEXT: .hword 0
define void @needsStackRealignment() {
%val = alloca i64, i32 3, align 128
- tail call void (...) @escape_values(i64* %val)
+ tail call void (...) @escape_values(ptr %val)
; Note: Adding any non-constant to the stackmap would fail because we
; expected to be able to address off the frame pointer. In a realigned
; frame, we must use the stack pointer instead. This is a separate bug.
@@ -555,10 +555,10 @@ define void @floats(float %f, double %g) {
%ff = alloca float
%gg = alloca double
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 888, i32 0, float 1.25,
- double 1.5, float %f, double %g, float* %ff, double* %gg)
+ double 1.5, float %f, double %g, ptr %ff, ptr %gg)
ret void
}
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/MIR/X86/liveout-register-mask.mir b/llvm/test/CodeGen/MIR/X86/liveout-register-mask.mir
index 956a436a8557c..69a5fd437085e 100644
--- a/llvm/test/CodeGen/MIR/X86/liveout-register-mask.mir
+++ b/llvm/test/CodeGen/MIR/X86/liveout-register-mask.mir
@@ -6,11 +6,11 @@
define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 5, i8* null, i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 5, ptr null, i32 2, i64 %p1, i64 %p2)
ret void
}
- declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+ declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
...
---
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll b/llvm/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
index e1ffe913adbaf..a7d372c5e61a2 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
@@ -8,7 +8,7 @@ define i64 @anyreglimit(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i6
i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32) {
entry:
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 16, i8* inttoptr (i64 0 to i8*), i32 32,
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 16, ptr inttoptr (i64 0 to ptr), i32 32,
i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8,
i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16,
i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
@@ -16,4 +16,4 @@ entry:
ret i64 %result
}
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-anyregcc.ll b/llvm/test/CodeGen/PowerPC/ppc64-anyregcc.ll
index b8c62c3f9362e..9b48fb72dc7cf 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-anyregcc.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-anyregcc.ll
@@ -96,7 +96,7 @@ target triple = "powerpc64-unknown-linux-gnu"
; CHECK-NEXT: .long 3
define i64 @test() nounwind ssp uwtable {
entry:
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 40, i8* null, i32 2, i32 1, i32 2, i64 3)
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 40, ptr null, i32 2, i32 1, i32 2, i64 3)
ret i64 0
}
@@ -119,10 +119,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @property_access1(i8* %obj) nounwind ssp uwtable {
+define i64 @property_access1(ptr %obj) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 40, i8* %f, i32 1, i8* %obj)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 40, ptr %f, i32 1, ptr %obj)
ret i64 %ret
}
@@ -148,8 +148,8 @@ entry:
define i64 @property_access2() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 40, i8* %f, i32 1, i64* %obj)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 40, ptr %f, i32 1, ptr %obj)
ret i64 %ret
}
@@ -175,8 +175,8 @@ entry:
define i64 @property_access3() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 40, i8* %f, i32 0, i64* %obj)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 40, ptr %f, i32 0, ptr %obj)
ret i64 %ret
}
@@ -283,10 +283,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+define i64 @anyreg_test1(ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 40, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 40, ptr %f, i32 13, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13)
ret i64 %ret
}
@@ -393,10 +393,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+define i64 @anyreg_test2(ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 281474417671919 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 40, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ %f = inttoptr i64 281474417671919 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 40, ptr %f, i32 8, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12, ptr %a13)
ret i64 %ret
}
@@ -430,7 +430,7 @@ entry:
; CHECK-NEXT: .long 0
define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 40, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 40, ptr inttoptr (i64 0 to ptr), i32 2, i64 %p1, i64 %p2)
tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17
},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
ret i64 %result
@@ -482,9 +482,9 @@ define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17
},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 40, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 40, ptr inttoptr (i64 0 to ptr), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
ret i64 %result
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-patchpoint.ll b/llvm/test/CodeGen/PowerPC/ppc64-patchpoint.ll
index 7a6f5acb92148..a11afbb1a475d 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-patchpoint.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-patchpoint.ll
@@ -39,10 +39,10 @@ entry:
; CHECK: blr
- %resolveCall2 = inttoptr i64 244837814094590 to i8*
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 40, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
- %resolveCall3 = inttoptr i64 244837814094591 to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 40, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ %resolveCall2 = inttoptr i64 244837814094590 to ptr
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 40, ptr %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %resolveCall3 = inttoptr i64 244837814094591 to ptr
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 40, ptr %resolveCall3, i32 2, i64 %p1, i64 %result)
ret i64 %result
}
@@ -60,10 +60,10 @@ entry:
define void @caller_meta_leaf() {
entry:
%metadata = alloca i64, i32 3, align 8
- store i64 11, i64* %metadata
- store i64 12, i64* %metadata
- store i64 13, i64* %metadata
- call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+ store i64 11, ptr %metadata
+ store i64 12, ptr %metadata
+ store i64 13, ptr %metadata
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata)
ret void
}
@@ -71,19 +71,19 @@ entry:
; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
; There is no way to verify this, since it depends on memory allocation.
; But I think it's useful to include as a working example.
-define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) {
+define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, ptr %tmp33, i64 %tmp79) {
entry:
%tmp80 = add i64 %tmp79, -16
- %tmp81 = inttoptr i64 %tmp80 to i64*
- %tmp82 = load i64, i64* %tmp81, align 8
+ %tmp81 = inttoptr i64 %tmp80 to ptr
+ %tmp82 = load i64, ptr %tmp81, align 8
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 48, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
- %tmp83 = load i64, i64* %tmp33, align 8
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 48, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
+ %tmp83 = load i64, ptr %tmp33, align 8
%tmp84 = add i64 %tmp83, -24
- %tmp85 = inttoptr i64 %tmp84 to i64*
- %tmp86 = load i64, i64* %tmp85, align 8
+ %tmp85 = inttoptr i64 %tmp84 to ptr
+ %tmp86 = load i64, ptr %tmp85, align 8
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 48, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 48, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
ret i64 10
}
@@ -99,7 +99,7 @@ entry:
; CHECK-NEXT: nop
; CHECK-NOT: nop
; CHECK: blr
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, ptr null, i32 2, i64 %p1, i64 %p2)
ret void
}
@@ -114,11 +114,12 @@ entry:
; CHECK-NEXT: nop
; CHECK-NOT: nop
; CHECK: blr
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 9, i32 12, i8* bitcast (i64 (i64, i64)* @foo to i8*), i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, ptr, i32, ...)
+ @llvm.experimental.patchpoint.i64(i64 9, i32 12, ptr @foo, i32 2, i64 %p1, i64 %p2)
ret i64 %result
}
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-stackmap.ll b/llvm/test/CodeGen/PowerPC/ppc64-stackmap.ll
index 9cb1b4e562461..a75f225861034 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-stackmap.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-stackmap.ll
@@ -151,8 +151,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @constantargs() {
entry:
- %0 = inttoptr i64 244837814094590 to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 40, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296, i128 66, i128 4294967297)
+ %0 = inttoptr i64 244837814094590 to ptr
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 40, ptr %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296, i128 66, i128 4294967297)
ret void
}
@@ -176,7 +176,7 @@ entry:
define void @osrinline(i64 %a, i64 %b) {
entry:
; Runtime void->void call.
- call void inttoptr (i64 244837814094590 to void ()*)()
+ call void inttoptr (i64 244837814094590 to ptr)()
; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
ret void
@@ -207,8 +207,8 @@ entry:
br i1 %test, label %ret, label %cold
cold:
; OSR patchpoint with 12-byte nop-slide and 2 live vars.
- %thunk = inttoptr i64 244837814094590 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 40, i8* %thunk, i32 0, i64 %a, i64 %b)
+ %thunk = inttoptr i64 244837814094590 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 40, ptr %thunk, i32 0, i64 %a, i64 %b)
unreachable
ret:
ret void
@@ -221,10 +221,10 @@ ret:
;
; FIXME: There are currently no stackmap entries. After moving to
; AnyRegCC, we will have entries for the object and return value.
-define i64 @propertyRead(i64* %obj) {
+define i64 @propertyRead(ptr %obj) {
entry:
- %resolveRead = inttoptr i64 244837814094590 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 40, i8* %resolveRead, i32 1, i64* %obj)
+ %resolveRead = inttoptr i64 244837814094590 to ptr
+ %result = call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 40, ptr %resolveRead, i32 1, ptr %obj)
%add = add i64 %result, 3
ret i64 %add
}
@@ -245,10 +245,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
+define void @propertyWrite(i64 %dummy1, ptr %obj, i64 %dummy2, i64 %a) {
entry:
- %resolveWrite = inttoptr i64 244837814094590 to i8*
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 40, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ %resolveWrite = inttoptr i64 244837814094590 to ptr
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 40, ptr %resolveWrite, i32 2, ptr %obj, i64 %a)
ret void
}
@@ -271,10 +271,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define void @jsVoidCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 244837814094590 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 40, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 244837814094590 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 40, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
ret void
}
@@ -297,10 +297,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define i64 @jsIntCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 244837814094590 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 40, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 244837814094590 to ptr
+ %result = call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 40, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
%add = add i64 %result, 3
ret i64 %add
}
@@ -323,7 +323,7 @@ entry:
; CHECK-NEXT: .long
define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
entry:
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 40, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 40, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
ret void
}
@@ -438,10 +438,10 @@ define void @floats(float %f, double %g) {
%ff = alloca float
%gg = alloca double
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 888, i32 0, float 1.25,
- double 1.5, float %f, double %g, float* %ff, double* %gg)
+ double 1.5, float %f, double %g, ptr %ff, ptr %gg)
ret void
}
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/SystemZ/anyregcc-novec.ll b/llvm/test/CodeGen/SystemZ/anyregcc-novec.ll
index b37cd220ffeba..e4f5366e41892 100644
--- a/llvm/test/CodeGen/SystemZ/anyregcc-novec.ll
+++ b/llvm/test/CodeGen/SystemZ/anyregcc-novec.ll
@@ -70,5 +70,5 @@ entry:
ret void
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/SystemZ/anyregcc-vec.ll b/llvm/test/CodeGen/SystemZ/anyregcc-vec.ll
index 02f1c4d483c1e..80540e46620c2 100644
--- a/llvm/test/CodeGen/SystemZ/anyregcc-vec.ll
+++ b/llvm/test/CodeGen/SystemZ/anyregcc-vec.ll
@@ -95,5 +95,5 @@ entry:
ret void
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/SystemZ/anyregcc.ll b/llvm/test/CodeGen/SystemZ/anyregcc.ll
index 2480f8c36b38c..76b9352f30049 100644
--- a/llvm/test/CodeGen/SystemZ/anyregcc.ll
+++ b/llvm/test/CodeGen/SystemZ/anyregcc.ll
@@ -71,7 +71,7 @@
; CHECK-NEXT: .long 3
define i64 @test() nounwind ssp uwtable {
entry:
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 14, i8* null, i32 2, i32 1, i32 2, i64 3)
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 14, ptr null, i32 2, i32 1, i32 2, i64 3)
ret i64 0
}
@@ -94,10 +94,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @property_access1(i8* %obj) nounwind ssp uwtable {
+define i64 @property_access1(ptr %obj) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 14, i8* %f, i32 1, i8* %obj)
+ %f = inttoptr i64 12297829382473034410 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 14, ptr %f, i32 1, ptr %obj)
ret i64 %ret
}
@@ -123,8 +123,8 @@ entry:
define i64 @property_access2() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
- %f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 14, i8* %f, i32 1, i64* %obj)
+ %f = inttoptr i64 12297829382473034410 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 14, ptr %f, i32 1, ptr %obj)
ret i64 %ret
}
@@ -150,8 +150,8 @@ entry:
define i64 @property_access3() nounwind ssp uwtable {
entry:
%obj = alloca i64, align 8
- %f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 14, i8* %f, i32 0, i64* %obj)
+ %f = inttoptr i64 12297829382473034410 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 14, ptr %f, i32 0, ptr %obj)
ret i64 %ret
}
@@ -251,10 +251,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12) nounwind ssp uwtable {
+define i64 @anyreg_test1(ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 14, i8* %f, i32 12, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12)
+ %f = inttoptr i64 12297829382473034410 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 14, ptr %f, i32 12, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12)
ret i64 %ret
}
@@ -354,10 +354,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12) nounwind ssp uwtable {
+define i64 @anyreg_test2(ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12) nounwind ssp uwtable {
entry:
- %f = inttoptr i64 12297829382473034410 to i8*
- %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 14, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12)
+ %f = inttoptr i64 12297829382473034410 to ptr
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 14, ptr %f, i32 8, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a10, ptr %a11, ptr %a12)
ret i64 %ret
}
@@ -391,7 +391,7 @@ entry:
; CHECK-NEXT: .long 0
define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 14, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 14, ptr inttoptr (i64 0 to ptr), i32 2, i64 %p1, i64 %p2)
tail call void asm sideeffect "nopr %r0", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14}"() nounwind
ret i64 %result
}
@@ -441,9 +441,9 @@ entry:
define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
tail call void asm sideeffect "nopr %r0", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14}"() nounwind
- %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 14, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %result = tail call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 14, ptr inttoptr (i64 0 to ptr), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
ret i64 %result
}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/SystemZ/patchpoint-invoke.ll b/llvm/test/CodeGen/SystemZ/patchpoint-invoke.ll
index a9bb7acd589fe..7f2df536e86c3 100644
--- a/llvm/test/CodeGen/SystemZ/patchpoint-invoke.ll
+++ b/llvm/test/CodeGen/SystemZ/patchpoint-invoke.ll
@@ -2,7 +2,7 @@
; Test invoking of patchpoints
;
-define i64 @patchpoint_invoke(i64 %p1, i64 %p2) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i64 @patchpoint_invoke(i64 %p1, i64 %p2) personality ptr @__gxx_personality_v0 {
entry:
; CHECK-LABEL: patchpoint_invoke:
; CHECK-NEXT: [[FUNC_BEGIN:.L.*]]:
@@ -17,16 +17,16 @@ entry:
; CHECK-NEXT: bcr 0, %r0
; CHECK-NEXT: [[PP_END:.L.*]]:
; CHECK: br %r14
- %resolveCall = inttoptr i64 559038736 to i8*
- %result = invoke i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 10, i8* %resolveCall, i32 1, i64 %p1, i64 %p2)
+ %resolveCall = inttoptr i64 559038736 to ptr
+ %result = invoke i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 10, ptr %resolveCall, i32 1, i64 %p1, i64 %p2)
to label %success unwind label %threw
success:
ret i64 %result
threw:
- %0 = landingpad { i8*, i32 }
- catch i8* null
+ %0 = landingpad { ptr, i32 }
+ catch ptr null
ret i64 0
}
@@ -60,6 +60,6 @@ threw:
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
declare i32 @__gxx_personality_v0(...)
diff --git a/llvm/test/CodeGen/SystemZ/patchpoint.ll b/llvm/test/CodeGen/SystemZ/patchpoint.ll
index ef98190a2af06..9b5d480aaada2 100644
--- a/llvm/test/CodeGen/SystemZ/patchpoint.ll
+++ b/llvm/test/CodeGen/SystemZ/patchpoint.ll
@@ -14,10 +14,10 @@ entry:
; CHECK-NEXT: bcr 0, %r0
; CHECK: lgr %r2, [[REG0:%r[0-9]+]]
; CHECK: br %r14
- %resolveCall2 = inttoptr i64 559038736 to i8*
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 10, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
- %resolveCall3 = inttoptr i64 559038737 to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 10, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ %resolveCall2 = inttoptr i64 559038736 to ptr
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 10, ptr %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %resolveCall3 = inttoptr i64 559038737 to ptr
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 10, ptr %resolveCall3, i32 2, i64 %p1, i64 %result)
ret i64 %result
}
@@ -31,7 +31,7 @@ entry:
; CHECK: brasl %r14, foo at PLT
; CHECK-NEXT: bcr 0, %r0
; CHECK: br %r14
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 9, i32 8, i8* bitcast (i64 (i64, i64)* @foo to i8*), i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 9, i32 8, ptr @foo, i32 2, i64 %p1, i64 %p2)
ret i64 %result
}
@@ -47,10 +47,10 @@ entry:
define void @caller_meta_leaf() {
entry:
%metadata = alloca i64, i32 3, align 8
- store i64 11, i64* %metadata
- store i64 12, i64* %metadata
- store i64 13, i64* %metadata
- call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+ store i64 11, ptr %metadata
+ store i64 12, ptr %metadata
+ store i64 13, ptr %metadata
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata)
ret void
}
@@ -58,19 +58,19 @@ entry:
; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
; There is no way to verify this, since it depends on memory allocation.
; But I think it's useful to include as a working example.
-define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) {
+define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, ptr %tmp33, i64 %tmp79) {
entry:
%tmp80 = add i64 %tmp79, -16
- %tmp81 = inttoptr i64 %tmp80 to i64*
- %tmp82 = load i64, i64* %tmp81, align 8
+ %tmp81 = inttoptr i64 %tmp80 to ptr
+ %tmp82 = load i64, ptr %tmp81, align 8
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 6, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
- %tmp83 = load i64, i64* %tmp33, align 8
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 30, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
+ %tmp83 = load i64, ptr %tmp33, align 8
%tmp84 = add i64 %tmp83, -24
- %tmp85 = inttoptr i64 %tmp84 to i64*
- %tmp86 = load i64, i64* %tmp85, align 8
+ %tmp85 = inttoptr i64 %tmp84 to ptr
+ %tmp86 = load i64, ptr %tmp85, align 8
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 6, i64 %arg, i64 %tmp10, i64 %tmp86)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 30, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
ret i64 10
}
@@ -81,7 +81,7 @@ entry:
; CHECK: .Ltmp
; CHECK: bcr 0, %r0
; CHECK: br %r14
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 2, i8* null, i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 2, ptr null, i32 2, i64 %p1, i64 %p2)
ret void
}
@@ -92,8 +92,8 @@ entry:
; CHECK: llilf %r1, 2566957755
; CHECK-NEXT: iihf %r1, 1432778632
; CHECK-NEXT: basr %r14, %r1
- %resolveCall2 = inttoptr i64 6153737369414576827 to i8*
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 14, i8* %resolveCall2, i32 0)
+ %resolveCall2 = inttoptr i64 6153737369414576827 to ptr
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 14, ptr %resolveCall2, i32 0)
ret i64 %result
}
@@ -117,7 +117,7 @@ block0:
br label %exit
patch1:
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 65536, i8* null, i32 0)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 65536, ptr null, i32 0)
br label %exit
exit:
@@ -151,5 +151,5 @@ exit:
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/SystemZ/stackmap.ll b/llvm/test/CodeGen/SystemZ/stackmap.ll
index d415a68bfc96a..88c7336037c9c 100644
--- a/llvm/test/CodeGen/SystemZ/stackmap.ll
+++ b/llvm/test/CodeGen/SystemZ/stackmap.ll
@@ -179,8 +179,8 @@
define void @constantargs() {
entry:
- %0 = inttoptr i64 12345 to i8*
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 14, i8* %0, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1, i128 66, i128 4294967297)
+ %0 = inttoptr i64 12345 to ptr
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 14, ptr %0, i32 0, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1, i128 66, i128 4294967297)
ret void
}
@@ -204,7 +204,7 @@ entry:
define void @osrinline(i64 %a, i64 %b) {
entry:
; Runtime void->void call.
- call void inttoptr (i64 -559038737 to void ()*)()
+ call void inttoptr (i64 -559038737 to ptr)()
; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
ret void
@@ -235,8 +235,8 @@ entry:
br i1 %test, label %ret, label %cold
cold:
; OSR patchpoint with 12-byte nop-slide and 2 live vars.
- %thunk = inttoptr i64 -559038737 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 14, i8* %thunk, i32 0, i64 %a, i64 %b)
+ %thunk = inttoptr i64 -559038737 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 14, ptr %thunk, i32 0, i64 %a, i64 %b)
unreachable
ret:
ret void
@@ -258,10 +258,10 @@ ret:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @propertyRead(i64* %obj) {
+define i64 @propertyRead(ptr %obj) {
entry:
- %resolveRead = inttoptr i64 -559038737 to i8*
- %result = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 14, i8* %resolveRead, i32 1, i64* %obj)
+ %resolveRead = inttoptr i64 -559038737 to ptr
+ %result = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 14, ptr %resolveRead, i32 1, ptr %obj)
%add = add i64 %result, 3
ret i64 %add
}
@@ -282,10 +282,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
+define void @propertyWrite(i64 %dummy1, ptr %obj, i64 %dummy2, i64 %a) {
entry:
- %resolveWrite = inttoptr i64 -559038737 to i8*
- call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 14, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ %resolveWrite = inttoptr i64 -559038737 to ptr
+ call anyregcc void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 14, ptr %resolveWrite, i32 2, ptr %obj, i64 %a)
ret void
}
@@ -308,10 +308,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define void @jsVoidCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 -559038737 to i8*
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 14, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 -559038737 to ptr
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 14, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
ret void
}
@@ -334,10 +334,10 @@ entry:
; CHECK-NEXT: .short {{[0-9]+}}
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
-define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+define i64 @jsIntCall(i64 %dummy1, ptr %obj, i64 %arg, i64 %l1, i64 %l2) {
entry:
- %resolveCall = inttoptr i64 -559038737 to i8*
- %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 14, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %resolveCall = inttoptr i64 -559038737 to ptr
+ %result = call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 14, ptr %resolveCall, i32 2, ptr %obj, i64 %arg, i64 %l1, i64 %l2)
%add = add i64 %result, 3
ret i64 %add
}
@@ -360,7 +360,7 @@ entry:
; CHECK-NEXT: .long
define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) {
entry:
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 14, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 14, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
ret void
}
@@ -409,7 +409,7 @@ bb1:
unreachable
bb2:
- %tmp = load i64, i64* inttoptr (i64 140685446136880 to i64*)
+ %tmp = load i64, ptr inttoptr (i64 140685446136880 to ptr)
br i1 undef, label %bb16, label %bb17
bb16:
@@ -484,13 +484,13 @@ define void @liveConstant() {
define void @directFrameIdx() {
entry:
%metadata1 = alloca i64, i32 3, align 8
- store i64 11, i64* %metadata1
- store i64 12, i64* %metadata1
- store i64 13, i64* %metadata1
- call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1)
+ store i64 11, ptr %metadata1
+ store i64 12, ptr %metadata1
+ store i64 13, ptr %metadata1
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, ptr %metadata1)
%metadata2 = alloca i8, i32 4, align 8
%metadata3 = alloca i16, i32 4, align 8
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 6, i8* null, i32 0, i8* %metadata2, i16* %metadata3)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 6, ptr null, i32 0, ptr %metadata2, ptr %metadata3)
ret void
}
@@ -506,10 +506,10 @@ entry:
; CHECK: .long .L{{.*}}-longid
define void @longid() {
entry:
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, i8* null, i32 0)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, i8* null, i32 0)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, i8* null, i32 0)
- tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, i8* null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, ptr null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, ptr null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, ptr null, i32 0)
+ tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, ptr null, i32 0)
ret void
}
@@ -541,7 +541,7 @@ define void @clobberScratch(i32 %a) {
; CHECK-NEXT: .short 0
define void @needsStackRealignment() {
%val = alloca i64, i32 3, align 128
- tail call void (...) @escape_values(i64* %val)
+ tail call void (...) @escape_values(ptr %val)
; Note: Adding any non-constant to the stackmap would fail because we
; expected to be able to address off the frame pointer. In a realigned
; frame, we must use the stack pointer instead. This is a separate bug.
@@ -600,10 +600,10 @@ define void @floats(float %f, double %g) {
%ff = alloca float
%gg = alloca double
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 888, i32 0, float 1.25,
- double 1.5, float %f, double %g, float* %ff, double* %gg)
+ double 1.5, float %f, double %g, ptr %ff, ptr %gg)
ret void
}
declare void @llvm.experimental.stackmap(i64, i32, ...)
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
-declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/X86/patchpoint-verifiable.mir b/llvm/test/CodeGen/X86/patchpoint-verifiable.mir
index cd57c64b4b93a..9bd6ab0303f23 100644
--- a/llvm/test/CodeGen/X86/patchpoint-verifiable.mir
+++ b/llvm/test/CodeGen/X86/patchpoint-verifiable.mir
@@ -6,11 +6,11 @@
define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
entry:
- %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 5, i8* null, i32 2, i64 %p1, i64 %p2)
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 5, ptr null, i32 2, i64 %p1, i64 %p2)
ret void
}
- declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+ declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
...
---
diff --git a/llvm/test/CodeGen/X86/selectiondag-patchpoint-legalize.ll b/llvm/test/CodeGen/X86/selectiondag-patchpoint-legalize.ll
index 5d5d9fde8c823..0bc13ad8c9723 100644
--- a/llvm/test/CodeGen/X86/selectiondag-patchpoint-legalize.ll
+++ b/llvm/test/CodeGen/X86/selectiondag-patchpoint-legalize.ll
@@ -109,25 +109,25 @@
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
- at p32 = external global i8 addrspace(270)*
+ at p32 = external global ptr addrspace(270)
%struct1 = type {i32, i64}
%struct2 = type {i1, i1, i1}
-declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
-define dso_local i32 @main(i32 %argc, i8** %argv) {
+define dso_local i32 @main(i32 %argc, ptr %argv) {
entry:
%i1reg = icmp eq i32 %argc, 5
%i7reg = zext i1 %i1reg to i7
%halfreg = sitofp i32 %argc to half
- %ptr32 = load i8 addrspace(270)*, i8 addrspace(270)** @p32
+ %ptr32 = load ptr addrspace(270), ptr @p32
%structreg1 = insertvalue %struct1 zeroinitializer, i32 %argc, 0
%structreg2 = insertvalue %struct2 zeroinitializer, i1 %i1reg, 0
- call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(
i64 0,
i32 0,
- i8* null,
+ ptr null,
i32 0,
i1 %i1reg,
i7 22,
@@ -142,7 +142,7 @@ entry:
; FIXME: test non-constant i128 once these are fixed:
; - https://github.com/llvm/llvm-project/issues/26431
; - https://github.com/llvm/llvm-project/issues/55957
- i8 addrspace(270)* %ptr32,
+ ptr addrspace(270) %ptr32,
; FIXME: The stackmap record generated for structs is incorrect:
; - https://github.com/llvm/llvm-project/issues/55649
; - https://github.com/llvm/llvm-project/issues/55957
More information about the llvm-commits
mailing list