[llvm] 0c5b0b5 - [X86] Convert some tests to opaque pointers (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 19 04:11:59 PST 2022
Author: Nikita Popov
Date: 2022-12-19T13:11:31+01:00
New Revision: 0c5b0b50c22d215177f7cdacf533444665ffd864
URL: https://github.com/llvm/llvm-project/commit/0c5b0b50c22d215177f7cdacf533444665ffd864
DIFF: https://github.com/llvm/llvm-project/commit/0c5b0b50c22d215177f7cdacf533444665ffd864.diff
LOG: [X86] Convert some tests to opaque pointers (NFC)
Added:
Modified:
llvm/test/CodeGen/X86/aliases.ll
llvm/test/CodeGen/X86/avxneconvert-intrinsics.ll
llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll
llvm/test/CodeGen/X86/cfguard-checks-funclet.ll
llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll
llvm/test/CodeGen/X86/compress-undef-float-passthrough.ll
llvm/test/CodeGen/X86/dllexport-x86_64.ll
llvm/test/CodeGen/X86/expand-large-div-rem-sdiv129.ll
llvm/test/CodeGen/X86/expand-large-div-rem-srem129.ll
llvm/test/CodeGen/X86/expand-large-div-rem-udiv129.ll
llvm/test/CodeGen/X86/expand-large-div-rem-urem129.ll
llvm/test/CodeGen/X86/fshl-splat-undef.ll
llvm/test/CodeGen/X86/func-sanitizer.ll
llvm/test/CodeGen/X86/function-alias.ll
llvm/test/CodeGen/X86/masked_compressstore_isel.ll
llvm/test/CodeGen/X86/no-plt-libcalls.ll
llvm/test/CodeGen/X86/pcsections.ll
llvm/test/CodeGen/X86/pr35763.ll
llvm/test/CodeGen/X86/pr44749.ll
llvm/test/CodeGen/X86/pr56351.ll
llvm/test/CodeGen/X86/pr57283.ll
llvm/test/CodeGen/X86/pr57474.ll
llvm/test/CodeGen/X86/pr57673.ll
llvm/test/CodeGen/X86/raoint-intrinsics-32.ll
llvm/test/CodeGen/X86/raoint-intrinsics-64.ll
llvm/test/CodeGen/X86/reassociate-add.ll
llvm/test/CodeGen/X86/selectiondag-stackmap-legalize.ll
llvm/test/CodeGen/X86/stack-protector-2.ll
llvm/test/CodeGen/X86/stack-protector-musttail.ll
llvm/test/CodeGen/X86/stackmap-dynamic-alloca.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/aliases.ll b/llvm/test/CodeGen/X86/aliases.ll
index d69b820a91695..03ea2579d0f8a 100644
--- a/llvm/test/CodeGen/X86/aliases.ll
+++ b/llvm/test/CodeGen/X86/aliases.ll
@@ -2,27 +2,27 @@
; RUN: -relocation-model=pic | FileCheck %s
@thread_var = thread_local global i32 42, align 4
- at thread_alias = thread_local(localdynamic) alias i32, i32* @thread_var
+ at thread_alias = thread_local(localdynamic) alias i32, ptr @thread_var
; CHECK-LABEL: get_thread_var
-define i32* @get_thread_var() {
+define ptr @get_thread_var() {
; CHECK: leal thread_var at TLSGD
- ret i32* @thread_var
+ ret ptr @thread_var
}
; CHECK-LABEL: get_thread_alias
-define i32* @get_thread_alias() {
+define ptr @get_thread_alias() {
; CHECK: leal thread_alias at TLSLD
- ret i32* @thread_alias
+ ret ptr @thread_alias
}
@bar = global i32 42
; CHECK-DAG: .globl foo1
- at foo1 = alias i32, i32* @bar
+ at foo1 = alias i32, ptr @bar
; CHECK-DAG: .globl foo2
- at foo2 = alias i32, i32* @bar
+ at foo2 = alias i32, ptr @bar
%FunTy = type i32()
@@ -30,43 +30,43 @@ define i32 @foo_f() {
ret i32 0
}
; CHECK-DAG: .weak bar_f
- at bar_f = weak alias %FunTy, %FunTy* @foo_f
+ at bar_f = weak alias %FunTy, ptr @foo_f
- at bar_l = linkonce_odr alias i32, i32* @bar
+ at bar_l = linkonce_odr alias i32, ptr @bar
; CHECK-DAG: .weak bar_l
- at bar_i = internal alias i32, i32* @bar
+ at bar_i = internal alias i32, ptr @bar
; CHECK-DAG: .globl A
- at A = alias i64, bitcast (i32* @bar to i64*)
+ at A = alias i64, ptr @bar
; CHECK-DAG: .globl bar_h
; CHECK-DAG: .hidden bar_h
- at bar_h = hidden alias i32, i32* @bar
+ at bar_h = hidden alias i32, ptr @bar
; CHECK-DAG: .globl bar_p
; CHECK-DAG: .protected bar_p
- at bar_p = protected alias i32, i32* @bar
+ at bar_p = protected alias i32, ptr @bar
; CHECK-DAG: .set test2, bar+4
- at test2 = alias i32, getelementptr(i32, i32* @bar, i32 1)
+ at test2 = alias i32, getelementptr(i32, ptr @bar, i32 1)
; CHECK-DAG: .set test3, 42
- at test3 = alias i32, inttoptr(i32 42 to i32*)
+ at test3 = alias i32, inttoptr(i32 42 to ptr)
; CHECK-DAG: .set test4, bar
- at test4 = alias i32, inttoptr(i64 ptrtoint (i32* @bar to i64) to i32*)
+ at test4 = alias i32, inttoptr(i64 ptrtoint (ptr @bar to i64) to ptr)
; CHECK-DAG: .set test5, test2-bar
- at test5 = alias i32, inttoptr(i32 sub (i32 ptrtoint (i32* @test2 to i32),
- i32 ptrtoint (i32* @bar to i32)) to i32*)
+ at test5 = alias i32, inttoptr(i32 sub (i32 ptrtoint (ptr @test2 to i32),
+ i32 ptrtoint (ptr @bar to i32)) to ptr)
; CHECK-DAG: .globl test
define i32 @test() {
entry:
- %tmp = load i32, i32* @foo1
- %tmp1 = load i32, i32* @foo2
- %tmp0 = load i32, i32* @bar_i
+ %tmp = load i32, ptr @foo1
+ %tmp1 = load i32, ptr @foo2
+ %tmp0 = load i32, ptr @bar_i
%tmp2 = call i32 @foo_f()
%tmp3 = add i32 %tmp, %tmp2
%tmp4 = call i32 @bar_f()
diff --git a/llvm/test/CodeGen/X86/avxneconvert-intrinsics.ll b/llvm/test/CodeGen/X86/avxneconvert-intrinsics.ll
index e7bc936546254..293a67e59e0c9 100644
--- a/llvm/test/CodeGen/X86/avxneconvert-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avxneconvert-intrinsics.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+avxneconvert | FileCheck %s --check-prefixes=CHECK,X64
; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=i686-unknown-unknown --show-mc-encoding -mattr=+avxneconvert | FileCheck %s --check-prefixes=CHECK,X86
-define <4 x float> @test_int_x86_vbcstnebf162ps128(i8* %A) {
+define <4 x float> @test_int_x86_vbcstnebf162ps128(ptr %A) {
; X64-LABEL: test_int_x86_vbcstnebf162ps128:
; X64: # %bb.0:
; X64-NEXT: vbcstnebf162ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x7a,0xb1,0x07]
@@ -13,12 +13,12 @@ define <4 x float> @test_int_x86_vbcstnebf162ps128(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vbcstnebf162ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x7a,0xb1,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <4 x float> @llvm.x86.vbcstnebf162ps128(i8* %A)
+ %ret = call <4 x float> @llvm.x86.vbcstnebf162ps128(ptr %A)
ret <4 x float> %ret
}
-declare <4 x float> @llvm.x86.vbcstnebf162ps128(i8* %A)
+declare <4 x float> @llvm.x86.vbcstnebf162ps128(ptr %A)
-define <8 x float> @test_int_x86_vbcstnebf162ps256(i8* %A) {
+define <8 x float> @test_int_x86_vbcstnebf162ps256(ptr %A) {
; X64-LABEL: test_int_x86_vbcstnebf162ps256:
; X64: # %bb.0:
; X64-NEXT: vbcstnebf162ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7e,0xb1,0x07]
@@ -29,12 +29,12 @@ define <8 x float> @test_int_x86_vbcstnebf162ps256(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vbcstnebf162ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7e,0xb1,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <8 x float> @llvm.x86.vbcstnebf162ps256(i8* %A)
+ %ret = call <8 x float> @llvm.x86.vbcstnebf162ps256(ptr %A)
ret <8 x float> %ret
}
-declare <8 x float> @llvm.x86.vbcstnebf162ps256(i8* %A)
+declare <8 x float> @llvm.x86.vbcstnebf162ps256(ptr %A)
-define <4 x float> @test_int_x86_vbcstnesh2ps128(i8* %A) {
+define <4 x float> @test_int_x86_vbcstnesh2ps128(ptr %A) {
; X64-LABEL: test_int_x86_vbcstnesh2ps128:
; X64: # %bb.0:
; X64-NEXT: vbcstnesh2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0xb1,0x07]
@@ -45,12 +45,12 @@ define <4 x float> @test_int_x86_vbcstnesh2ps128(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vbcstnesh2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0xb1,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <4 x float> @llvm.x86.vbcstnesh2ps128(i8* %A)
+ %ret = call <4 x float> @llvm.x86.vbcstnesh2ps128(ptr %A)
ret <4 x float> %ret
}
-declare <4 x float> @llvm.x86.vbcstnesh2ps128(i8* %A)
+declare <4 x float> @llvm.x86.vbcstnesh2ps128(ptr %A)
-define <8 x float> @test_int_x86_vbcstnesh2ps256(i8* %A) {
+define <8 x float> @test_int_x86_vbcstnesh2ps256(ptr %A) {
; X64-LABEL: test_int_x86_vbcstnesh2ps256:
; X64: # %bb.0:
; X64-NEXT: vbcstnesh2ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7d,0xb1,0x07]
@@ -61,12 +61,12 @@ define <8 x float> @test_int_x86_vbcstnesh2ps256(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vbcstnesh2ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7d,0xb1,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <8 x float> @llvm.x86.vbcstnesh2ps256(i8* %A)
+ %ret = call <8 x float> @llvm.x86.vbcstnesh2ps256(ptr %A)
ret <8 x float> %ret
}
-declare <8 x float> @llvm.x86.vbcstnesh2ps256(i8* %A)
+declare <8 x float> @llvm.x86.vbcstnesh2ps256(ptr %A)
-define <4 x float> @test_int_x86_vcvtneebf162ps128(i8* %A) {
+define <4 x float> @test_int_x86_vcvtneebf162ps128(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneebf162ps128:
; X64: # %bb.0:
; X64-NEXT: vcvtneebf162ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x7a,0xb0,0x07]
@@ -77,12 +77,12 @@ define <4 x float> @test_int_x86_vcvtneebf162ps128(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneebf162ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x7a,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <4 x float> @llvm.x86.vcvtneebf162ps128(i8* %A)
+ %ret = call <4 x float> @llvm.x86.vcvtneebf162ps128(ptr %A)
ret <4 x float> %ret
}
-declare <4 x float> @llvm.x86.vcvtneebf162ps128(i8* %A)
+declare <4 x float> @llvm.x86.vcvtneebf162ps128(ptr %A)
-define <8 x float> @test_int_x86_vcvtneebf162ps256(i8* %A) {
+define <8 x float> @test_int_x86_vcvtneebf162ps256(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneebf162ps256:
; X64: # %bb.0:
; X64-NEXT: vcvtneebf162ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7e,0xb0,0x07]
@@ -93,12 +93,12 @@ define <8 x float> @test_int_x86_vcvtneebf162ps256(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneebf162ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7e,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <8 x float> @llvm.x86.vcvtneebf162ps256(i8* %A)
+ %ret = call <8 x float> @llvm.x86.vcvtneebf162ps256(ptr %A)
ret <8 x float> %ret
}
-declare <8 x float> @llvm.x86.vcvtneebf162ps256(i8* %A)
+declare <8 x float> @llvm.x86.vcvtneebf162ps256(ptr %A)
-define <4 x float> @test_int_x86_vcvtneeph2ps128(i8* %A) {
+define <4 x float> @test_int_x86_vcvtneeph2ps128(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneeph2ps128:
; X64: # %bb.0:
; X64-NEXT: vcvtneeph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x79,0xb0,0x07]
@@ -109,12 +109,12 @@ define <4 x float> @test_int_x86_vcvtneeph2ps128(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneeph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x79,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <4 x float> @llvm.x86.vcvtneeph2ps128(i8* %A)
+ %ret = call <4 x float> @llvm.x86.vcvtneeph2ps128(ptr %A)
ret <4 x float> %ret
}
-declare <4 x float> @llvm.x86.vcvtneeph2ps128(i8* %A)
+declare <4 x float> @llvm.x86.vcvtneeph2ps128(ptr %A)
-define <8 x float> @test_int_x86_vcvtneeph2ps256(i8* %A) {
+define <8 x float> @test_int_x86_vcvtneeph2ps256(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneeph2ps256:
; X64: # %bb.0:
; X64-NEXT: vcvtneeph2ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7d,0xb0,0x07]
@@ -125,12 +125,12 @@ define <8 x float> @test_int_x86_vcvtneeph2ps256(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneeph2ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7d,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <8 x float> @llvm.x86.vcvtneeph2ps256(i8* %A)
+ %ret = call <8 x float> @llvm.x86.vcvtneeph2ps256(ptr %A)
ret <8 x float> %ret
}
-declare <8 x float> @llvm.x86.vcvtneeph2ps256(i8* %A)
+declare <8 x float> @llvm.x86.vcvtneeph2ps256(ptr %A)
-define <4 x float> @test_int_x86_vcvtneobf162ps128(i8* %A) {
+define <4 x float> @test_int_x86_vcvtneobf162ps128(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneobf162ps128:
; X64: # %bb.0:
; X64-NEXT: vcvtneobf162ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x7b,0xb0,0x07]
@@ -141,12 +141,12 @@ define <4 x float> @test_int_x86_vcvtneobf162ps128(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneobf162ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x7b,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <4 x float> @llvm.x86.vcvtneobf162ps128(i8* %A)
+ %ret = call <4 x float> @llvm.x86.vcvtneobf162ps128(ptr %A)
ret <4 x float> %ret
}
-declare <4 x float> @llvm.x86.vcvtneobf162ps128(i8* %A)
+declare <4 x float> @llvm.x86.vcvtneobf162ps128(ptr %A)
-define <8 x float> @test_int_x86_vcvtneobf162ps256(i8* %A) {
+define <8 x float> @test_int_x86_vcvtneobf162ps256(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneobf162ps256:
; X64: # %bb.0:
; X64-NEXT: vcvtneobf162ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7f,0xb0,0x07]
@@ -157,12 +157,12 @@ define <8 x float> @test_int_x86_vcvtneobf162ps256(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneobf162ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7f,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <8 x float> @llvm.x86.vcvtneobf162ps256(i8* %A)
+ %ret = call <8 x float> @llvm.x86.vcvtneobf162ps256(ptr %A)
ret <8 x float> %ret
}
-declare <8 x float> @llvm.x86.vcvtneobf162ps256(i8* %A)
+declare <8 x float> @llvm.x86.vcvtneobf162ps256(ptr %A)
-define <4 x float> @test_int_x86_vcvtneoph2ps128(i8* %A) {
+define <4 x float> @test_int_x86_vcvtneoph2ps128(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneoph2ps128:
; X64: # %bb.0:
; X64-NEXT: vcvtneoph2ps (%rdi), %xmm0 # encoding: [0xc4,0xe2,0x78,0xb0,0x07]
@@ -173,12 +173,12 @@ define <4 x float> @test_int_x86_vcvtneoph2ps128(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneoph2ps (%eax), %xmm0 # encoding: [0xc4,0xe2,0x78,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <4 x float> @llvm.x86.vcvtneoph2ps128(i8* %A)
+ %ret = call <4 x float> @llvm.x86.vcvtneoph2ps128(ptr %A)
ret <4 x float> %ret
}
-declare <4 x float> @llvm.x86.vcvtneoph2ps128(i8* %A)
+declare <4 x float> @llvm.x86.vcvtneoph2ps128(ptr %A)
-define <8 x float> @test_int_x86_vcvtneoph2ps256(i8* %A) {
+define <8 x float> @test_int_x86_vcvtneoph2ps256(ptr %A) {
; X64-LABEL: test_int_x86_vcvtneoph2ps256:
; X64: # %bb.0:
; X64-NEXT: vcvtneoph2ps (%rdi), %ymm0 # encoding: [0xc4,0xe2,0x7c,0xb0,0x07]
@@ -189,10 +189,10 @@ define <8 x float> @test_int_x86_vcvtneoph2ps256(i8* %A) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vcvtneoph2ps (%eax), %ymm0 # encoding: [0xc4,0xe2,0x7c,0xb0,0x00]
; X86-NEXT: retl # encoding: [0xc3]
- %ret = call <8 x float> @llvm.x86.vcvtneoph2ps256(i8* %A)
+ %ret = call <8 x float> @llvm.x86.vcvtneoph2ps256(ptr %A)
ret <8 x float> %ret
}
-declare <8 x float> @llvm.x86.vcvtneoph2ps256(i8* %A)
+declare <8 x float> @llvm.x86.vcvtneoph2ps256(ptr %A)
define <8 x bfloat> @test_int_x86_vcvtneps2bf16128(<4 x float> %A) {
; CHECK-LABEL: test_int_x86_vcvtneps2bf16128:
diff --git a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll
index 5c17079519116..f9e44ac4132be 100644
--- a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll
@@ -5,7 +5,7 @@
declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
-define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssd_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -22,7 +22,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, <4
; X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x50,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, <4 x i32>* %x2p
+ %x2 = load <4 x i32>, ptr %x2p
%1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
%2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
%res = add <4 x i32> %1, %2
@@ -31,7 +31,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, <4
declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssds_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -48,7 +48,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, <
; X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x51,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, <4 x i32>* %x2p
+ %x2 = load <4 x i32>, ptr %x2p
%1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
%2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
%res = add <4 x i32> %1, %2
@@ -57,7 +57,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, <
declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
-define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssd_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -74,7 +74,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, <8
; X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x50,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # encoding: [0xc5,0xe5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, <8 x i32>* %x2p
+ %x2 = load <8 x i32>, ptr %x2p
%1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
%2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
%res = add <8 x i32> %1, %2
@@ -83,7 +83,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, <8
declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>)
-define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssds_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -100,7 +100,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, <
; X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x51,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # encoding: [0xc5,0xe5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, <8 x i32>* %x2p
+ %x2 = load <8 x i32>, ptr %x2p
%1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
%2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
%res = add <8 x i32> %1, %2
@@ -109,7 +109,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, <
declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsud_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -126,7 +126,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, <4
; X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x50,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, <4 x i32>* %x2p
+ %x2 = load <4 x i32>, ptr %x2p
%1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
%2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
%res = add <4 x i32> %1, %2
@@ -135,7 +135,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, <4
declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsuds_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -152,7 +152,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, <
; X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x51,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, <4 x i32>* %x2p
+ %x2 = load <4 x i32>, ptr %x2p
%1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
%2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
%res = add <4 x i32> %1, %2
@@ -161,7 +161,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, <
declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsud_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -178,7 +178,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, <8
; X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x50,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # encoding: [0xc5,0xe5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, <8 x i32>* %x2p
+ %x2 = load <8 x i32>, ptr %x2p
%1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
%2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
%res = add <8 x i32> %1, %2
@@ -187,7 +187,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, <8
declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
-define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsuds_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -204,7 +204,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, <
; X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x51,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # encoding: [0xc5,0xe5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, <8 x i32>* %x2p
+ %x2 = load <8 x i32>, ptr %x2p
%1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
%2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
%res = add <8 x i32> %1, %2
@@ -213,7 +213,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, <
declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuud_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -230,7 +230,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, <4
; X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x50,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, <4 x i32>* %x2p
+ %x2 = load <4 x i32>, ptr %x2p
%1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
%2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
%res = add <4 x i32> %1, %2
@@ -239,7 +239,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, <4
declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuuds_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -256,7 +256,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, <
; X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x51,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # encoding: [0xc5,0xe1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, <4 x i32>* %x2p
+ %x2 = load <4 x i32>, ptr %x2p
%1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
%2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
%res = add <4 x i32> %1, %2
@@ -265,7 +265,7 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, <
declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuud_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -282,7 +282,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, <8
; X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x50,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # encoding: [0xc5,0xe5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, <8 x i32>* %x2p
+ %x2 = load <8 x i32>, ptr %x2p
%1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
%2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
%res = add <8 x i32> %1, %2
@@ -291,7 +291,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, <8
declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
-define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuuds_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -308,7 +308,7 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, <
; X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x51,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # encoding: [0xc5,0xe5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, <8 x i32>* %x2p
+ %x2 = load <8 x i32>, ptr %x2p
%1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
%2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
%res = add <8 x i32> %1, %2
diff --git a/llvm/test/CodeGen/X86/cfguard-checks-funclet.ll b/llvm/test/CodeGen/X86/cfguard-checks-funclet.ll
index 8cf148f3d66ae..4466882dbfe55 100644
--- a/llvm/test/CodeGen/X86/cfguard-checks-funclet.ll
+++ b/llvm/test/CodeGen/X86/cfguard-checks-funclet.ll
@@ -10,25 +10,25 @@
declare i32 @target_func()
-%eh.ThrowInfo = type { i32, i8*, i8*, i8* }
+%eh.ThrowInfo = type { i32, ptr, ptr, ptr }
declare i32 @__CxxFrameHandler3(...)
-declare void @_CxxThrowException(i8*, %eh.ThrowInfo*)
+declare void @_CxxThrowException(ptr, ptr)
-define i32 @func_cf_exception() personality i32 (...)* @__CxxFrameHandler3 {
+define i32 @func_cf_exception() personality ptr @__CxxFrameHandler3 {
entry:
- %func_ptr = alloca i32 ()*, align 8
- store i32 ()* @target_func, i32 ()** %func_ptr, align 8
- invoke void @_CxxThrowException(i8* null, %eh.ThrowInfo* null) #11
+ %func_ptr = alloca ptr, align 8
+ store ptr @target_func, ptr %func_ptr, align 8
+ invoke void @_CxxThrowException(ptr null, ptr null) #11
to label %unreachable unwind label %ehcleanup
ehcleanup:
%0 = cleanuppad within none []
- %isnull = icmp eq i32 ()** %func_ptr, null
+ %isnull = icmp eq ptr %func_ptr, null
br i1 %isnull, label %exit, label %callfn
callfn:
- %1 = load i32 ()*, i32 ()** %func_ptr, align 8
+ %1 = load ptr, ptr %func_ptr, align 8
%2 = call i32 %1() #9 [ "funclet"(token %0) ]
br label %exit
@@ -42,7 +42,7 @@ catch.dispatch:
%3 = catchswitch within none [label %catch] unwind to caller
catch:
- %4 = catchpad within %3 [i8* null, i32 64, i8* null]
+ %4 = catchpad within %3 [ptr null, i32 64, ptr null]
catchret from %4 to label %try.cont
try.cont:
diff --git a/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll b/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll
index c9b4b1ac10bd0..b888fb5b1ff69 100644
--- a/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll
@@ -1,358 +1,358 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+cmpccxadd | FileCheck %s
-define dso_local i32 @test_cmpbexadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpbexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpbexadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpoxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe0,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 0)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 0)
ret i32 %0
}
-declare i32 @llvm.x86.cmpccxadd32(i8*, i32, i32, i32 immarg)
+declare i32 @llvm.x86.cmpccxadd32(ptr, i32, i32, i32 immarg)
-define dso_local i64 @test_cmpbexadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpbexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpbexadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpoxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe0,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 0)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 0)
ret i64 %0
}
-declare i64 @llvm.x86.cmpccxadd64(i8*, i64, i64, i32 immarg)
+declare i64 @llvm.x86.cmpccxadd64(ptr, i64, i64, i32 immarg)
-define dso_local i32 @test_cmpbxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpbxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpbxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnoxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe1,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 1)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 1)
ret i32 %0
}
-define dso_local i64 @test_cmpbxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpbxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpbxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnoxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe1,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 1)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 1)
ret i64 %0
}
-define dso_local i32 @test_cmplexadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmplexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmplexadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpbxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe2,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 2)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 2)
ret i32 %0
}
-define dso_local i64 @test_cmplexadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmplexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmplexadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpbxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe2,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 2)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 2)
ret i64 %0
}
-define dso_local i32 @test_cmplxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmplxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmplxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnbxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe3,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 3)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 3)
ret i32 %0
}
-define dso_local i64 @test_cmplxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmplxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmplxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnbxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe3,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 3)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 3)
ret i64 %0
}
-define dso_local i32 @test_cmpnbexadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnbexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnbexadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpzxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe4,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 4)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 4)
ret i32 %0
}
-define dso_local i64 @test_cmpnbexadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnbexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnbexadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpzxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe4,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 4)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 4)
ret i64 %0
}
-define dso_local i32 @test_cmpnbxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnbxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnbxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnzxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe5,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 5)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 5)
ret i32 %0
}
-define dso_local i64 @test_cmpnbxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnbxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnbxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnzxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe5,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 5)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 5)
ret i64 %0
}
-define dso_local i32 @test_cmpnlexadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnlexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnlexadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpbexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe6,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 6)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 6)
ret i32 %0
}
-define dso_local i64 @test_cmpnlexadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnlexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnlexadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpbexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe6,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 6)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 6)
ret i64 %0
}
-define dso_local i32 @test_cmpnlxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnlxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnlxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnbexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe7,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 7)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 7)
ret i32 %0
}
-define dso_local i64 @test_cmpnlxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnlxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnlxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnbexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe7,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 7)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 7)
ret i64 %0
}
-define dso_local i32 @test_cmpnoxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnoxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnoxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpsxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe8,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 8)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 8)
ret i32 %0
}
-define dso_local i64 @test_cmpnoxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnoxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnoxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpsxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe8,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 8)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 8)
ret i64 %0
}
-define dso_local i32 @test_cmpnpxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnpxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnpxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnsxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe9,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 9)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 9)
ret i32 %0
}
-define dso_local i64 @test_cmpnpxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnpxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnpxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnsxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe9,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 9)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 9)
ret i64 %0
}
-define dso_local i32 @test_cmpnsxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnsxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnsxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmppxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xea,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 10)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 10)
ret i32 %0
}
-define dso_local i64 @test_cmpnsxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnsxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnsxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmppxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xea,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 10)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 10)
ret i64 %0
}
-define dso_local i32 @test_cmpnzxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpnzxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpnzxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnpxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xeb,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 11)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 11)
ret i32 %0
}
-define dso_local i64 @test_cmpnzxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpnzxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpnzxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnpxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xeb,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 11)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 11)
ret i64 %0
}
-define dso_local i32 @test_cmpoxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpoxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpoxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmplxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xec,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 12)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 12)
ret i32 %0
}
-define dso_local i64 @test_cmpoxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpoxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpoxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmplxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xec,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 12)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 12)
ret i64 %0
}
-define dso_local i32 @test_cmppxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmppxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmppxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnlxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xed,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 13)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 13)
ret i32 %0
}
-define dso_local i64 @test_cmppxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmppxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmppxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnlxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xed,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 13)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 13)
ret i64 %0
}
-define dso_local i32 @test_cmpsxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpsxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpsxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmplexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xee,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 14)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 14)
ret i32 %0
}
-define dso_local i64 @test_cmpsxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpsxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpsxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmplexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xee,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 14)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 14)
ret i64 %0
}
-define dso_local i32 @test_cmpzxadd32(i8* %__A, i32 %__B, i32 %__C) nounwind {
+define dso_local i32 @test_cmpzxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
; CHECK-LABEL: test_cmpzxadd32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl %esi, %eax # encoding: [0x89,0xf0]
; CHECK-NEXT: cmpnlexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xef,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i32 @llvm.x86.cmpccxadd32(i8* %__A, i32 %__B, i32 %__C, i32 15)
+ %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 15)
ret i32 %0
}
-define dso_local i64 @test_cmpzxadd64(i8* %__A, i64 %__B, i64 %__C) nounwind {
+define dso_local i64 @test_cmpzxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
; CHECK-LABEL: test_cmpzxadd64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
; CHECK-NEXT: cmpnlexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xef,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
entry:
- %0 = tail call i64 @llvm.x86.cmpccxadd64(i8* %__A, i64 %__B, i64 %__C, i32 15)
+ %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 15)
ret i64 %0
}
diff --git a/llvm/test/CodeGen/X86/compress-undef-float-passthrough.ll b/llvm/test/CodeGen/X86/compress-undef-float-passthrough.ll
index 5d3eafdce4b55..47331db7261b3 100644
--- a/llvm/test/CodeGen/X86/compress-undef-float-passthrough.ll
+++ b/llvm/test/CodeGen/X86/compress-undef-float-passthrough.ll
@@ -13,7 +13,7 @@ define void @test_compress_undef_float_passthrough() {
entry: ; preds = %loop.50
%0 = bitcast i4 undef to <4 x i1>
%1 = call <4 x double> @llvm.x86.avx512.mask.compress.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> <i1 1, i1 0, i1 1, i1 0>)
- call void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double> %1, <4 x double*> undef, i32 0, <4 x i1> %0)
+ call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %1, <4 x ptr> undef, i32 0, <4 x i1> %0)
ret void
}
@@ -21,5 +21,5 @@ entry: ; preds = %loop.50
declare <4 x double> @llvm.x86.avx512.mask.compress.v4f64(<4 x double>, <4 x double>, <4 x i1>)
; Function Attrs: nocallback nofree nosync nounwind willreturn writeonly
-declare void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double>, <4 x double*>, i32 immarg, <4 x i1>)
+declare void @llvm.masked.scatter.v4f64.v4p0(<4 x double>, <4 x ptr>, i32 immarg, <4 x i1>)
diff --git a/llvm/test/CodeGen/X86/dllexport-x86_64.ll b/llvm/test/CodeGen/X86/dllexport-x86_64.ll
index 6205cffc4ba95..e6a6aa7567835 100644
--- a/llvm/test/CodeGen/X86/dllexport-x86_64.ll
+++ b/llvm/test/CodeGen/X86/dllexport-x86_64.ll
@@ -105,32 +105,32 @@ define weak_odr dllexport void @weak1() {
; CHECK: .globl alias
; CHECK: .set alias, notExported
- at alias = dllexport alias void(), void()* @notExported
+ at alias = dllexport alias void(), ptr @notExported
; CHECK: .globl aliasNotExported
; CHECK: .set aliasNotExported, f1
- at aliasNotExported = alias void(), void()* @f1
+ at aliasNotExported = alias void(), ptr @f1
; CHECK: .globl alias2
; CHECK: .set alias2, f1
- at alias2 = dllexport alias void(), void()* @f1
+ at alias2 = dllexport alias void(), ptr @f1
; CHECK: .globl alias3
; CHECK: .set alias3, notExported
- at alias3 = dllexport alias void(), void()* @notExported
+ at alias3 = dllexport alias void(), ptr @notExported
; CHECK: .weak weak_alias
; CHECK: .set weak_alias, f1
- at weak_alias = weak_odr dllexport alias void(), void()* @f1
+ at weak_alias = weak_odr dllexport alias void(), ptr @f1
@blob = global [6 x i8] c"\B8*\00\00\00\C3", section ".text", align 16
- at blob_alias = dllexport alias i32 (), bitcast ([6 x i8]* @blob to i32 ()*)
+ at blob_alias = dllexport alias i32 (), ptr @blob
@exportedButNotDefinedVariable = external dllexport global i32
declare dllexport void @exportedButNotDefinedFunction()
define void @foo() {
entry:
- store i32 4, i32* @exportedButNotDefinedVariable, align 4
+ store i32 4, ptr @exportedButNotDefinedVariable, align 4
call void @exportedButNotDefinedFunction()
ret void
}
diff --git a/llvm/test/CodeGen/X86/expand-large-div-rem-sdiv129.ll b/llvm/test/CodeGen/X86/expand-large-div-rem-sdiv129.ll
index 4b6d438fe6d08..2b592fdbf0d29 100644
--- a/llvm/test/CodeGen/X86/expand-large-div-rem-sdiv129.ll
+++ b/llvm/test/CodeGen/X86/expand-large-div-rem-sdiv129.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=x86_64-- -expand-large-div-rem -expand-div-rem-bits 128 < %s | FileCheck %s
-define void @sdiv129(i129* %ptr, i129* %out) nounwind {
+define void @sdiv129(ptr %ptr, ptr %out) nounwind {
; CHECK-LABEL: @sdiv129(
; CHECK-NEXT: _udiv-special-cases:
-; CHECK-NEXT: [[A:%.*]] = load i129, i129* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = freeze i129 [[A]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i129 3
; CHECK-NEXT: [[TMP2:%.*]] = ashr i129 [[TMP0]], 128
@@ -66,11 +66,11 @@ define void @sdiv129(i129* %ptr, i129* %out) nounwind {
; CHECK-NEXT: [[TMP48:%.*]] = phi i129 [ [[TMP25]], [[UDIV_LOOP_EXIT]] ], [ [[TMP20]], [[_UDIV_SPECIAL_CASES:%.*]] ]
; CHECK-NEXT: [[TMP49:%.*]] = xor i129 [[TMP48]], [[TMP8]]
; CHECK-NEXT: [[TMP50:%.*]] = sub i129 [[TMP49]], [[TMP8]]
-; CHECK-NEXT: store i129 [[TMP50]], i129* [[OUT:%.*]], align 4
+; CHECK-NEXT: store i129 [[TMP50]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
- %a = load i129, i129* %ptr
+ %a = load i129, ptr %ptr
%res = sdiv i129 %a, 3
- store i129 %res, i129* %out
+ store i129 %res, ptr %out
ret void
}
diff --git a/llvm/test/CodeGen/X86/expand-large-div-rem-srem129.ll b/llvm/test/CodeGen/X86/expand-large-div-rem-srem129.ll
index 89fb554003c46..f0f410def9641 100644
--- a/llvm/test/CodeGen/X86/expand-large-div-rem-srem129.ll
+++ b/llvm/test/CodeGen/X86/expand-large-div-rem-srem129.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=x86_64-- -expand-large-div-rem -expand-div-rem-bits 128 < %s | FileCheck %s
-define void @test(i129* %ptr, i129* %out) nounwind {
+define void @test(ptr %ptr, ptr %out) nounwind {
; CHECK-LABEL: @test(
; CHECK-NEXT: _udiv-special-cases:
-; CHECK-NEXT: [[A:%.*]] = load i129, i129* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = freeze i129 [[A]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i129 3
; CHECK-NEXT: [[TMP2:%.*]] = ashr i129 [[TMP0]], 128
@@ -69,11 +69,11 @@ define void @test(i129* %ptr, i129* %out) nounwind {
; CHECK-NEXT: [[TMP51:%.*]] = sub i129 [[TMP8]], [[TMP50]]
; CHECK-NEXT: [[TMP52:%.*]] = xor i129 [[TMP51]], [[TMP2]]
; CHECK-NEXT: [[TMP53:%.*]] = sub i129 [[TMP52]], [[TMP2]]
-; CHECK-NEXT: store i129 [[TMP53]], i129* [[OUT:%.*]], align 4
+; CHECK-NEXT: store i129 [[TMP53]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
- %a = load i129, i129* %ptr
+ %a = load i129, ptr %ptr
%res = srem i129 %a, 3
- store i129 %res, i129* %out
+ store i129 %res, ptr %out
ret void
}
diff --git a/llvm/test/CodeGen/X86/expand-large-div-rem-udiv129.ll b/llvm/test/CodeGen/X86/expand-large-div-rem-udiv129.ll
index d6b8fed33f95d..79b527ec4f989 100644
--- a/llvm/test/CodeGen/X86/expand-large-div-rem-udiv129.ll
+++ b/llvm/test/CodeGen/X86/expand-large-div-rem-udiv129.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=x86_64-- -expand-large-div-rem -expand-div-rem-bits 128 < %s | FileCheck %s
-define void @test(i129* %ptr, i129* %out) nounwind {
+define void @test(ptr %ptr, ptr %out) nounwind {
; CHECK-LABEL: @test(
; CHECK-NEXT: _udiv-special-cases:
-; CHECK-NEXT: [[A:%.*]] = load i129, i129* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = freeze i129 3
; CHECK-NEXT: [[TMP1:%.*]] = freeze i129 [[A]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i129 [[TMP0]], 0
@@ -55,11 +55,11 @@ define void @test(i129* %ptr, i129* %out) nounwind {
; CHECK-NEXT: br i1 [[TMP38]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
; CHECK: udiv-end:
; CHECK-NEXT: [[TMP39:%.*]] = phi i129 [ [[TMP16]], [[UDIV_LOOP_EXIT]] ], [ [[TMP11]], [[_UDIV_SPECIAL_CASES:%.*]] ]
-; CHECK-NEXT: store i129 [[TMP39]], i129* [[OUT:%.*]], align 4
+; CHECK-NEXT: store i129 [[TMP39]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
- %a = load i129, i129* %ptr
+ %a = load i129, ptr %ptr
%res = udiv i129 %a, 3
- store i129 %res, i129* %out
+ store i129 %res, ptr %out
ret void
}
diff --git a/llvm/test/CodeGen/X86/expand-large-div-rem-urem129.ll b/llvm/test/CodeGen/X86/expand-large-div-rem-urem129.ll
index 087092d186d23..134875b00d717 100644
--- a/llvm/test/CodeGen/X86/expand-large-div-rem-urem129.ll
+++ b/llvm/test/CodeGen/X86/expand-large-div-rem-urem129.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=x86_64-- -expand-large-div-rem -expand-div-rem-bits 128 < %s | FileCheck %s
-define void @test(i129* %ptr, i129* %out) nounwind {
+define void @test(ptr %ptr, ptr %out) nounwind {
; CHECK-LABEL: @test(
; CHECK-NEXT: _udiv-special-cases:
-; CHECK-NEXT: [[A:%.*]] = load i129, i129* [[PTR:%.*]], align 4
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = freeze i129 [[A]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i129 3
; CHECK-NEXT: [[TMP2:%.*]] = freeze i129 [[TMP1]]
@@ -59,11 +59,11 @@ define void @test(i129* %ptr, i129* %out) nounwind {
; CHECK-NEXT: [[TMP41:%.*]] = phi i129 [ [[TMP18]], [[UDIV_LOOP_EXIT]] ], [ [[TMP13]], [[_UDIV_SPECIAL_CASES:%.*]] ]
; CHECK-NEXT: [[TMP42:%.*]] = mul i129 [[TMP1]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = sub i129 [[TMP0]], [[TMP42]]
-; CHECK-NEXT: store i129 [[TMP43]], i129* [[OUT:%.*]], align 4
+; CHECK-NEXT: store i129 [[TMP43]], ptr [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
;
- %a = load i129, i129* %ptr
+ %a = load i129, ptr %ptr
%res = urem i129 %a, 3
- store i129 %res, i129* %out
+ store i129 %res, ptr %out
ret void
}
diff --git a/llvm/test/CodeGen/X86/fshl-splat-undef.ll b/llvm/test/CodeGen/X86/fshl-splat-undef.ll
index 586aa9b110c5d..a2d0345077b87 100644
--- a/llvm/test/CodeGen/X86/fshl-splat-undef.ll
+++ b/llvm/test/CodeGen/X86/fshl-splat-undef.ll
@@ -17,7 +17,7 @@
; (We can not convert t2 to {i64 undef, i64 undef, ...})
; That is not equal with the origin result)
;
-define void @test_fshl(<8 x i64> %lo, <8 x i64> %hi, <8 x i64>* %arr) {
+define void @test_fshl(<8 x i64> %lo, <8 x i64> %hi, ptr %arr) {
; CHECK-LABEL: test_fshl:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -30,7 +30,7 @@ define void @test_fshl(<8 x i64> %lo, <8 x i64> %hi, <8 x i64>* %arr) {
entry:
%fshl = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %hi, <8 x i64> %lo, <8 x i64> <i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12, i64 12>)
%res = shufflevector <8 x i64> %fshl, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 6, i32 7>
- store <8 x i64> %res, <8 x i64>* %arr, align 64
+ store <8 x i64> %res, ptr %arr, align 64
ret void
}
diff --git a/llvm/test/CodeGen/X86/func-sanitizer.ll b/llvm/test/CodeGen/X86/func-sanitizer.ll
index b8d96a346d0c5..9825685b7c307 100644
--- a/llvm/test/CodeGen/X86/func-sanitizer.ll
+++ b/llvm/test/CodeGen/X86/func-sanitizer.ll
@@ -9,10 +9,10 @@
; CHECK: .size .L__llvm_rtti_proxy, 8
@i = linkonce_odr constant i32 1
- at __llvm_rtti_proxy = private unnamed_addr constant i32* @i
+ at __llvm_rtti_proxy = private unnamed_addr constant ptr @i
define dso_local void @_Z3funv() !func_sanitize !0 {
ret void
}
-!0 = !{i32 846595819, i32** @__llvm_rtti_proxy}
+!0 = !{i32 846595819, ptr @__llvm_rtti_proxy}
diff --git a/llvm/test/CodeGen/X86/function-alias.ll b/llvm/test/CodeGen/X86/function-alias.ll
index d68d75d5578aa..ddcffa6722ce2 100644
--- a/llvm/test/CodeGen/X86/function-alias.ll
+++ b/llvm/test/CodeGen/X86/function-alias.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gnu"
@0 = private constant <{ i8, i8 }> <{i8 15, i8 11}>, section ".text"
; function-typed alias
- at ud2 = alias void (), bitcast (<{ i8, i8 }>* @0 to void ()*)
+ at ud2 = alias void (), ptr bitcast (<{ i8, i8 }>* @0 to ptr)
; Check that "ud2" is emitted as a function symbol.
; CHECK: .type{{.*}}ud2, at function
diff --git a/llvm/test/CodeGen/X86/masked_compressstore_isel.ll b/llvm/test/CodeGen/X86/masked_compressstore_isel.ll
index db1f9ae7b1f45..1851a21c8c064 100644
--- a/llvm/test/CodeGen/X86/masked_compressstore_isel.ll
+++ b/llvm/test/CodeGen/X86/masked_compressstore_isel.ll
@@ -3,7 +3,7 @@
define void @_Z3fooiPiPs(<8 x i32> %gepload, <8 x i1> %0) #0 {
entry:
%1 = trunc <8 x i32> %gepload to <8 x i16>
- tail call void @llvm.masked.compressstore.v8i16(<8 x i16> %1, i16* null, <8 x i1> %0)
+ tail call void @llvm.masked.compressstore.v8i16(<8 x i16> %1, ptr null, <8 x i1> %0)
ret void
}
@@ -13,11 +13,11 @@ entry:
; CHECK-NEXT: %2:vr128x = VPSLLWZ128ri %1, 15
; CHECK-NEXT: %3:vk16wm = VPMOVW2MZ128rr killed %2
; CHECK-NEXT: %4:vr128x = VPMOVDWZ256rr %0
-; CHECK-NEXT: VPCOMPRESSWZ128mrk $noreg, 1, $noreg, 0, $noreg, killed %3, killed %4 :: (store unknown-size into `i16* null`, align 16)
+; CHECK-NEXT: VPCOMPRESSWZ128mrk $noreg, 1, $noreg, 0, $noreg, killed %3, killed %4 :: (store unknown-size into `ptr null`, align 16)
; CHECK-NEXT: RET 0
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: write)
-declare void @llvm.masked.compressstore.v8i16(<8 x i16>, i16* nocapture, <8 x i1>) #1
+declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr nocapture, <8 x i1>) #1
attributes #0 = { "target-cpu"="icelake-server" }
attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: write) }
diff --git a/llvm/test/CodeGen/X86/no-plt-libcalls.ll b/llvm/test/CodeGen/X86/no-plt-libcalls.ll
index 0e6fed91e19f6..8ccbfe967ef52 100644
--- a/llvm/test/CodeGen/X86/no-plt-libcalls.ll
+++ b/llvm/test/CodeGen/X86/no-plt-libcalls.ll
@@ -4,20 +4,18 @@
@percent_s = constant [4 x i8] c"%s\0A\00"
@hello_world = constant [13 x i8] c"hello world\0A\00"
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
define void @printf_call() {
; CHECK-LABEL: @printf_call(
-; CHECK-NEXT: [[PUTS:%.*]] = call i32 @puts(i8* nonnull dereferenceable(1) getelementptr inbounds ([13 x i8], [13 x i8]* @hello_world, i64 0, i64 0))
+; CHECK-NEXT: [[PUTS:%.*]] = call i32 @puts(ptr nonnull dereferenceable(1) @hello_world)
; CHECK-NEXT: ret void
;
- %fmt = getelementptr [4 x i8], [4 x i8]* @percent_s, i32 0, i32 0
- %str = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
- call i32 (i8*, ...) @printf(i8* %fmt, i8* %str)
+ call i32 (ptr, ...) @printf(ptr @percent_s, ptr @hello_world)
ret void
}
; CHECK: Function Attrs: nofree nounwind nonlazybind
-; CHECK-NEXT: declare noundef i32 @puts(i8* nocapture noundef readonly)
+; CHECK-NEXT: declare noundef i32 @puts(ptr nocapture noundef readonly)
!llvm.module.flags = !{!0}
!0 = !{i32 7, !"RtLibUseGOT", i32 1}
diff --git a/llvm/test/CodeGen/X86/pcsections.ll b/llvm/test/CodeGen/X86/pcsections.ll
index 412c0bf777daf..fbcac484e8e9d 100644
--- a/llvm/test/CodeGen/X86/pcsections.ll
+++ b/llvm/test/CodeGen/X86/pcsections.ll
@@ -69,7 +69,7 @@ define i64 @multiple() !pcsections !0 {
; CHECK-NEXT: .long 21264
; CHECK-NEXT: .text
entry:
- %0 = load i64, i64* @bar, align 8, !pcsections !2
+ %0 = load i64, ptr @bar, align 8, !pcsections !2
ret i64 %0
}
@@ -87,8 +87,8 @@ define i64 @test_simple_atomic() {
; LARGE-NEXT: .quad .Lpcsection1-.Lpcsection_base5
; CHECK-NEXT: .text
entry:
- %0 = load atomic i64, i64* @foo monotonic, align 8, !pcsections !0
- %1 = load i64, i64* @bar, align 8
+ %0 = load atomic i64, ptr @foo monotonic, align 8, !pcsections !0
+ %1 = load i64, ptr @bar, align 8
%add = add nsw i64 %1, %0
ret i64 %add
}
@@ -109,10 +109,10 @@ define i64 @test_complex_atomic() {
; LARGE-NEXT: .quad .Lpcsection2-.Lpcsection_base6
; CHECK-NEXT: .text
entry:
- %0 = atomicrmw add i64* @foo, i64 1 monotonic, align 8, !pcsections !0
- %1 = load i64, i64* @bar, align 8
+ %0 = atomicrmw add ptr @foo, i64 1 monotonic, align 8, !pcsections !0
+ %1 = load i64, ptr @bar, align 8
%inc = add nsw i64 %1, 1
- store i64 %inc, i64* @bar, align 8
+ store i64 %inc, ptr @bar, align 8
%add = add nsw i64 %1, %0
ret i64 %add
}
diff --git a/llvm/test/CodeGen/X86/pr35763.ll b/llvm/test/CodeGen/X86/pr35763.ll
index 8b3e91dc577ae..8cc4b1b906785 100644
--- a/llvm/test/CodeGen/X86/pr35763.ll
+++ b/llvm/test/CodeGen/X86/pr35763.ll
@@ -25,18 +25,18 @@ define dso_local void @PR35763() {
; CHECK-NEXT: movb %al, z+10(%rip)
; CHECK-NEXT: retq
entry:
- %0 = load i16, i16* getelementptr inbounds (%struct.S, %struct.S* bitcast ({ i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, [5 x i8] }* @z to %struct.S*), i32 0, i32 0), align 8
+ %0 = load i16, ptr @z, align 8
%conv = sext i16 %0 to i32
- %bf.load = load i32, i32* bitcast (i24* getelementptr inbounds (%struct.S, %struct.S* bitcast ({ i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, [5 x i8] }* @z to %struct.S*), i32 0, i32 1) to i32*), align 2
+ %bf.load = load i32, ptr getelementptr inbounds (%struct.S, ptr @z, i32 0, i32 1), align 2
%bf.clear = and i32 %bf.load, 2097151
%bf.cast = zext i32 %bf.clear to i64
%conv1 = trunc i64 %bf.cast to i32
%or = or i32 %conv, %conv1
%conv2 = trunc i32 %or to i16
%conv3 = zext i16 %conv2 to i64
- store i64 %conv3, i64* @tf_3_var_136, align 8
- %bf.load4 = load i40, i40* bitcast ([5 x i8]* getelementptr inbounds (%struct.S, %struct.S* bitcast ({ i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, [5 x i8] }* @z to %struct.S*), i32 0, i32 2) to i40*), align 2
+ store i64 %conv3, ptr @tf_3_var_136, align 8
+ %bf.load4 = load i40, ptr getelementptr inbounds (%struct.S, ptr @z, i32 0, i32 2), align 2
%bf.clear5 = and i40 %bf.load4, -8589869057
- store i40 %bf.clear5, i40* bitcast ([5 x i8]* getelementptr inbounds (%struct.S, %struct.S* bitcast ({ i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, [5 x i8] }* @z to %struct.S*), i32 0, i32 2) to i40*), align 2
+ store i40 %bf.clear5, ptr getelementptr inbounds (%struct.S, ptr @z, i32 0, i32 2), align 2
ret void
}
diff --git a/llvm/test/CodeGen/X86/pr44749.ll b/llvm/test/CodeGen/X86/pr44749.ll
index 43e9a8bef410d..b2c0e17fc6684 100644
--- a/llvm/test/CodeGen/X86/pr44749.ll
+++ b/llvm/test/CodeGen/X86/pr44749.ll
@@ -30,13 +30,13 @@ define i32 @a() {
entry:
%call = call i32 (...) @b()
%conv = sitofp i32 %call to double
- %fsub = fsub double sitofp (i32 select (i1 icmp ne (i8* (i64, i64)* bitcast (i8* getelementptr (i8, i8* bitcast (i8* (i64, i64)* @calloc to i8*), i64 1) to i8* (i64, i64)*), i8* (i64, i64)* null), i32 1, i32 0) to double), 1.000000e+02
+ %fsub = fsub double sitofp (i32 select (i1 icmp ne (ptr getelementptr (i8, ptr @calloc, i64 1), ptr null), i32 1, i32 0) to double), 1.000000e+02
%cmp = fcmp ole double %fsub, %conv
%cond = select i1 %cmp, double 1.000000e+00, double 3.140000e+00
%conv2 = fptosi double %cond to i32
ret i32 %conv2
}
-declare i8* @calloc(i64, i64)
+declare ptr @calloc(i64, i64)
declare i32 @b(...)
diff --git a/llvm/test/CodeGen/X86/pr56351.ll b/llvm/test/CodeGen/X86/pr56351.ll
index d9473b2680086..4ec58f7c1ad1d 100644
--- a/llvm/test/CodeGen/X86/pr56351.ll
+++ b/llvm/test/CodeGen/X86/pr56351.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+soft-float | FileCheck %s
-define i1 @foo(i128* %x, i128* %y) {
+define i1 @foo(ptr %x, ptr %y) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: movq (%rdi), %rax
@@ -11,8 +11,8 @@ define i1 @foo(i128* %x, i128* %y) {
; CHECK-NEXT: orq %rcx, %rax
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
- %a = load i128, i128* %x, align 16
- %b = load i128, i128* %y, align 16
+ %a = load i128, ptr %x, align 16
+ %b = load i128, ptr %y, align 16
%c = icmp eq i128 %a, %b
ret i1 %c
}
diff --git a/llvm/test/CodeGen/X86/pr57283.ll b/llvm/test/CodeGen/X86/pr57283.ll
index 58e1432cc3294..25e5ac72ab70c 100644
--- a/llvm/test/CodeGen/X86/pr57283.ll
+++ b/llvm/test/CodeGen/X86/pr57283.ll
@@ -25,14 +25,14 @@ define void @PR57283() nounwind {
BB:
%A6 = alloca i64, align 8
%A = alloca i64, align 8
- %L = load i64, i64* %A, align 4
+ %L = load i64, ptr %A, align 4
%B3 = sub i64 %L, %L
%B2 = mul i64 %B3, 4294967296
%B1 = add i64 %B2, %B2
%B4 = udiv i64 %B2, -9223372036854775808
%B = xor i64 %B1, %B4
- store i64 %B, i64* %A, align 4
+ store i64 %B, ptr %A, align 4
%B5 = sdiv i64 %B, -1
- store i64 %B5, i64* %A6, align 4
+ store i64 %B5, ptr %A6, align 4
ret void
}
diff --git a/llvm/test/CodeGen/X86/pr57474.ll b/llvm/test/CodeGen/X86/pr57474.ll
index b12b7ee475861..80f40039eccf0 100644
--- a/llvm/test/CodeGen/X86/pr57474.ll
+++ b/llvm/test/CodeGen/X86/pr57474.ll
@@ -17,7 +17,7 @@ BB:
BB1: ; preds = %BB
%A = alloca <1 x i16>, align 2
- %L1 = load <1 x i16>, <1 x i16>* %A, align 2
+ %L1 = load <1 x i16>, ptr %A, align 2
%I = insertelement <1 x i16> %L1, i16 -1, i16 0
%B6 = add <1 x i16> %I, %I
%B3 = srem <1 x i16> %B6, %I
@@ -26,6 +26,6 @@ BB1: ; preds = %BB
%B4 = udiv <1 x i16> %B3, <i16 -32768>
%B2 = or <1 x i16> %B4, %B5
%B = lshr <1 x i16> <i16 -32768>, %B2
- store <1 x i16> %B, <1 x i16>* %A, align 2
+ store <1 x i16> %B, ptr %A, align 2
ret void
}
diff --git a/llvm/test/CodeGen/X86/pr57673.ll b/llvm/test/CodeGen/X86/pr57673.ll
index 91fd3af4fceb5..5e7102059c0d2 100644
--- a/llvm/test/CodeGen/X86/pr57673.ll
+++ b/llvm/test/CodeGen/X86/pr57673.ll
@@ -13,46 +13,42 @@
target triple = "x86_64-unknown-linux-gnu"
-%t10 = type { i8*, [32 x i8] }
+%t10 = type { ptr, [32 x i8] }
define void @foo() {
bb_entry:
%tmp11 = alloca [0 x [0 x i32]], i32 0, align 4
%i = alloca %t10, align 8
%i1 = alloca %t10, align 8
- %tmp1.sub = getelementptr inbounds [0 x [0 x i32]], [0 x [0 x i32]]* %tmp11, i64 0, i64 0, i64 0
- %i2 = bitcast [0 x [0 x i32]]* %tmp11 to i8*
br label %bb_8
bb_8: ; preds = %bb_last, %bb_entry
br i1 undef, label %bb_last, label %bb_mid
bb_mid: ; preds = %bb_8
- %i3 = bitcast %t10* %i1 to i8*
- %i4 = getelementptr inbounds %t10, %t10* %i1, i64 0, i32 1, i64 32
- %i5 = bitcast %t10* %i to i8*
- %i6 = getelementptr inbounds %t10, %t10* %i, i64 0, i32 1, i64 32
- call void @llvm.lifetime.start.p0i8(i64 0, i8* nonnull %i3)
+ %i4 = getelementptr inbounds %t10, ptr %i1, i64 0, i32 1, i64 32
+ %i6 = getelementptr inbounds %t10, ptr %i, i64 0, i32 1, i64 32
+ call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %i1)
%v21 = call i64 @llvm.ctlz.i64(i64 undef, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull dereferenceable(16) null, i8* noundef nonnull align 8 dereferenceable(16) %i4, i64 16, i1 false)
- call void @llvm.dbg.value(metadata !DIArgList(i8* %i4, i8* %i4), metadata !4, metadata !DIExpression(DW_OP_LLVM_arg, 0)), !dbg !9
- call void @llvm.lifetime.end.p0i8(i64 0, i8* nonnull %i3)
- call void @llvm.lifetime.start.p0i8(i64 0, i8* nonnull %i5)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull dereferenceable(16) null, i8* noundef nonnull align 8 dereferenceable(16) %i6, i64 16, i1 false)
- call void @llvm.lifetime.end.p0i8(i64 0, i8* nonnull %i5)
+ call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull dereferenceable(16) null, ptr noundef nonnull align 8 dereferenceable(16) %i4, i64 16, i1 false)
+ call void @llvm.dbg.value(metadata !DIArgList(ptr %i4, ptr %i4), metadata !4, metadata !DIExpression(DW_OP_LLVM_arg, 0)), !dbg !9
+ call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %i1)
+ call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %i)
+ call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull dereferenceable(16) null, ptr noundef nonnull align 8 dereferenceable(16) %i6, i64 16, i1 false)
+ call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %i)
br label %bb_last
bb_last: ; preds = %bb_mid, %bb_8
- call void @llvm.lifetime.start.p0i8(i64 0, i8* nonnull %i2)
- call void undef(i32* null, i32* null, i32* null, i32 0, i32* nonnull %tmp1.sub)
- call void @llvm.lifetime.end.p0i8(i64 0, i8* nonnull %i2)
+ call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %tmp11)
+ call void undef(ptr null, ptr null, ptr null, i32 0, ptr nonnull %tmp11)
+ call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %tmp11)
br label %bb_8
}
declare i64 @llvm.ctlz.i64(i64, i1 immarg)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
declare void @llvm.dbg.value(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/CodeGen/X86/raoint-intrinsics-32.ll b/llvm/test/CodeGen/X86/raoint-intrinsics-32.ll
index d9d60fe34112c..9715c8f4c0348 100644
--- a/llvm/test/CodeGen/X86/raoint-intrinsics-32.ll
+++ b/llvm/test/CodeGen/X86/raoint-intrinsics-32.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+raoint | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -verify-machineinstrs -mtriple=i686-unknown-unknown --show-mc-encoding -mattr=+raoint | FileCheck %s --check-prefixes=X86
-define void @test_int_x86_aadd32(i8* %A, i32 %B) {
+define void @test_int_x86_aadd32(ptr %A, i32 %B) {
; X64-LABEL: test_int_x86_aadd32:
; X64: # %bb.0:
; X64-NEXT: aaddl %esi, (%rdi) # encoding: [0x0f,0x38,0xfc,0x37]
@@ -14,12 +14,12 @@ define void @test_int_x86_aadd32(i8* %A, i32 %B) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X86-NEXT: aaddl %eax, (%ecx) # encoding: [0x0f,0x38,0xfc,0x01]
; X86-NEXT: retl # encoding: [0xc3]
- call void @llvm.x86.aadd32(i8* %A, i32 %B)
+ call void @llvm.x86.aadd32(ptr %A, i32 %B)
ret void
}
-declare void @llvm.x86.aadd32(i8* %A, i32 %B)
+declare void @llvm.x86.aadd32(ptr %A, i32 %B)
-define void @test_int_x86_aand32(i8* %A, i32 %B) {
+define void @test_int_x86_aand32(ptr %A, i32 %B) {
; X64-LABEL: test_int_x86_aand32:
; X64: # %bb.0:
; X64-NEXT: aandl %esi, (%rdi) # encoding: [0x66,0x0f,0x38,0xfc,0x37]
@@ -31,12 +31,12 @@ define void @test_int_x86_aand32(i8* %A, i32 %B) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X86-NEXT: aandl %eax, (%ecx) # encoding: [0x66,0x0f,0x38,0xfc,0x01]
; X86-NEXT: retl # encoding: [0xc3]
- call void @llvm.x86.aand32(i8* %A, i32 %B)
+ call void @llvm.x86.aand32(ptr %A, i32 %B)
ret void
}
-declare void @llvm.x86.aand32(i8* %A, i32 %B)
+declare void @llvm.x86.aand32(ptr %A, i32 %B)
-define void @test_int_x86_aor32(i8* %A, i32 %B) {
+define void @test_int_x86_aor32(ptr %A, i32 %B) {
; X64-LABEL: test_int_x86_aor32:
; X64: # %bb.0:
; X64-NEXT: aorl %esi, (%rdi) # encoding: [0xf2,0x0f,0x38,0xfc,0x37]
@@ -48,12 +48,12 @@ define void @test_int_x86_aor32(i8* %A, i32 %B) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X86-NEXT: aorl %eax, (%ecx) # encoding: [0xf2,0x0f,0x38,0xfc,0x01]
; X86-NEXT: retl # encoding: [0xc3]
- call void @llvm.x86.aor32(i8* %A, i32 %B)
+ call void @llvm.x86.aor32(ptr %A, i32 %B)
ret void
}
-declare void @llvm.x86.aor32(i8* %A, i32 %B)
+declare void @llvm.x86.aor32(ptr %A, i32 %B)
-define void @test_int_x86_axor32(i8* %A, i32 %B) {
+define void @test_int_x86_axor32(ptr %A, i32 %B) {
; X64-LABEL: test_int_x86_axor32:
; X64: # %bb.0:
; X64-NEXT: axorl %esi, (%rdi) # encoding: [0xf3,0x0f,0x38,0xfc,0x37]
@@ -65,7 +65,7 @@ define void @test_int_x86_axor32(i8* %A, i32 %B) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X86-NEXT: axorl %eax, (%ecx) # encoding: [0xf3,0x0f,0x38,0xfc,0x01]
; X86-NEXT: retl # encoding: [0xc3]
- call void @llvm.x86.axor32(i8* %A, i32 %B)
+ call void @llvm.x86.axor32(ptr %A, i32 %B)
ret void
}
-declare void @llvm.x86.axor32(i8* %A, i32 %B)
+declare void @llvm.x86.axor32(ptr %A, i32 %B)
diff --git a/llvm/test/CodeGen/X86/raoint-intrinsics-64.ll b/llvm/test/CodeGen/X86/raoint-intrinsics-64.ll
index a77b37c39d3cb..9d4fec591b762 100644
--- a/llvm/test/CodeGen/X86/raoint-intrinsics-64.ll
+++ b/llvm/test/CodeGen/X86/raoint-intrinsics-64.ll
@@ -1,42 +1,42 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+raoint | FileCheck %s --check-prefixes=X64
-define void @test_int_x86_aadd64(i8* %A, i64 %B) {
+define void @test_int_x86_aadd64(ptr %A, i64 %B) {
; X64-LABEL: test_int_x86_aadd64:
; X64: # %bb.0:
; X64-NEXT: aaddq %rsi, (%rdi) # encoding: [0x48,0x0f,0x38,0xfc,0x37]
; X64-NEXT: retq # encoding: [0xc3]
- call void @llvm.x86.aadd64(i8* %A, i64 %B)
+ call void @llvm.x86.aadd64(ptr %A, i64 %B)
ret void
}
-declare void @llvm.x86.aadd64(i8* %A, i64 %B)
+declare void @llvm.x86.aadd64(ptr %A, i64 %B)
-define void @test_int_x86_aand64(i8* %A, i64 %B) {
+define void @test_int_x86_aand64(ptr %A, i64 %B) {
; X64-LABEL: test_int_x86_aand64:
; X64: # %bb.0:
; X64-NEXT: aandq %rsi, (%rdi) # encoding: [0x66,0x48,0x0f,0x38,0xfc,0x37]
; X64-NEXT: retq # encoding: [0xc3]
- call void @llvm.x86.aand64(i8* %A, i64 %B)
+ call void @llvm.x86.aand64(ptr %A, i64 %B)
ret void
}
-declare void @llvm.x86.aand64(i8* %A, i64 %B)
+declare void @llvm.x86.aand64(ptr %A, i64 %B)
-define void @test_int_x86_aor64(i8* %A, i64 %B) {
+define void @test_int_x86_aor64(ptr %A, i64 %B) {
; X64-LABEL: test_int_x86_aor64:
; X64: # %bb.0:
; X64-NEXT: aorq %rsi, (%rdi) # encoding: [0xf2,0x48,0x0f,0x38,0xfc,0x37]
; X64-NEXT: retq # encoding: [0xc3]
- call void @llvm.x86.aor64(i8* %A, i64 %B)
+ call void @llvm.x86.aor64(ptr %A, i64 %B)
ret void
}
-declare void @llvm.x86.aor64(i8* %A, i64 %B)
+declare void @llvm.x86.aor64(ptr %A, i64 %B)
-define void @test_int_x86_axor64(i8* %A, i64 %B) {
+define void @test_int_x86_axor64(ptr %A, i64 %B) {
; X64-LABEL: test_int_x86_axor64:
; X64: # %bb.0:
; X64-NEXT: axorq %rsi, (%rdi) # encoding: [0xf3,0x48,0x0f,0x38,0xfc,0x37]
; X64-NEXT: retq # encoding: [0xc3]
- call void @llvm.x86.axor64(i8* %A, i64 %B)
+ call void @llvm.x86.axor64(ptr %A, i64 %B)
ret void
}
-declare void @llvm.x86.axor64(i8* %A, i64 %B)
+declare void @llvm.x86.axor64(ptr %A, i64 %B)
diff --git a/llvm/test/CodeGen/X86/reassociate-add.ll b/llvm/test/CodeGen/X86/reassociate-add.ll
index 76f5535a3beb1..a0792e5ccf6df 100644
--- a/llvm/test/CodeGen/X86/reassociate-add.ll
+++ b/llvm/test/CodeGen/X86/reassociate-add.ll
@@ -7,7 +7,7 @@
; latency, v0 and v1 should be added first, and its result is added to t2
; later.
-define void @add8(i8 %x0, i8 %x1, i8 %x2, i8* %p) {
+define void @add8(i8 %x0, i8 %x1, i8 %x2, ptr %p) {
; CHECK-LABEL: add8:
; CHECK: # %bb.0:
; CHECK-NEXT: orb $16, %dil
@@ -27,11 +27,11 @@ define void @add8(i8 %x0, i8 %x1, i8 %x2, i8* %p) {
%t2 = mul i8 %t1, 100
%t3 = add i8 %t2, %v1
%t4 = add i8 %t3, %v0
- store i8 %t4, i8* %p, align 4
+ store i8 %t4, ptr %p, align 4
ret void
}
-define void @add16(i16 %x0, i16 %x1, i16 %x2, i16* %p) {
+define void @add16(i16 %x0, i16 %x1, i16 %x2, ptr %p) {
; CHECK-LABEL: add16:
; CHECK: # %bb.0:
; CHECK-NEXT: orl $16, %edi
@@ -50,11 +50,11 @@ define void @add16(i16 %x0, i16 %x1, i16 %x2, i16* %p) {
%t2 = mul i16 %t1, 100
%t3 = add i16 %t2, %v1
%t4 = add i16 %t3, %v0
- store i16 %t4, i16* %p, align 4
+ store i16 %t4, ptr %p, align 4
ret void
}
-define void @add32(i32 %x0, i32 %x1, i32 %x2, i32* %p) {
+define void @add32(i32 %x0, i32 %x1, i32 %x2, ptr %p) {
; CHECK-LABEL: add32:
; CHECK: # %bb.0:
; CHECK-NEXT: orl $16, %edi
@@ -73,11 +73,11 @@ define void @add32(i32 %x0, i32 %x1, i32 %x2, i32* %p) {
%t2 = mul i32 %t1, 100
%t3 = add i32 %t2, %v1
%t4 = add i32 %t3, %v0
- store i32 %t4, i32* %p, align 4
+ store i32 %t4, ptr %p, align 4
ret void
}
-define void @add64(i64 %x0, i64 %x1, i64 %x2, i64* %p) {
+define void @add64(i64 %x0, i64 %x1, i64 %x2, ptr %p) {
; CHECK-LABEL: add64:
; CHECK: # %bb.0:
; CHECK-NEXT: orq $16, %rdi
@@ -96,12 +96,12 @@ define void @add64(i64 %x0, i64 %x1, i64 %x2, i64* %p) {
%t2 = mul i64 %t1, 100
%t3 = add i64 %t2, %v1
%t4 = add i64 %t3, %v0
- store i64 %t4, i64* %p, align 4
+ store i64 %t4, ptr %p, align 4
ret void
}
; Negative test. Original sequence has shorter latency, don't transform it.
-define void @add64_negative(i64 %x0, i64 %x1, i64 %x2, i64* %p) {
+define void @add64_negative(i64 %x0, i64 %x1, i64 %x2, ptr %p) {
; CHECK-LABEL: add64_negative:
; CHECK: # %bb.0:
; CHECK-NEXT: orq $16, %rdi
@@ -120,6 +120,6 @@ define void @add64_negative(i64 %x0, i64 %x1, i64 %x2, i64* %p) {
%t2 = mul i64 %t1, 100
%t3 = add i64 %v0, %v1
%t4 = add i64 %t3, %t2
- store i64 %t4, i64* %p, align 4
+ store i64 %t4, ptr %p, align 4
ret void
}
diff --git a/llvm/test/CodeGen/X86/selectiondag-stackmap-legalize.ll b/llvm/test/CodeGen/X86/selectiondag-stackmap-legalize.ll
index bc624be5318e7..8bb47a3146b54 100644
--- a/llvm/test/CodeGen/X86/selectiondag-stackmap-legalize.ll
+++ b/llvm/test/CodeGen/X86/selectiondag-stackmap-legalize.ll
@@ -109,19 +109,19 @@
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
- at p32 = external global i8 addrspace(270)*
+ at p32 = external global ptr addrspace(270)
%struct1 = type {i32, i64}
%struct2 = type {i1, i1, i1}
declare void @llvm.experimental.stackmap(i64, i32, ...)
-define dso_local i32 @main(i32 %argc, i8** %argv) {
+define dso_local i32 @main(i32 %argc, ptr %argv) {
entry:
%i1reg = icmp eq i32 %argc, 5
%i7reg = zext i1 %i1reg to i7
%halfreg = sitofp i32 %argc to half
- %ptr32 = load i8 addrspace(270)*, i8 addrspace(270)** @p32
+ %ptr32 = load ptr addrspace(270), ptr @p32
%structreg1 = insertvalue %struct1 zeroinitializer, i32 %argc, 0
%structreg2 = insertvalue %struct2 zeroinitializer, i1 %i1reg, 0
call void (i64, i32, ...) @llvm.experimental.stackmap(
@@ -140,7 +140,7 @@ entry:
; FIXME: test non-constant i128 once these are fixed:
; - https://github.com/llvm/llvm-project/issues/26431
; - https://github.com/llvm/llvm-project/issues/55957
- i8 addrspace(270)* %ptr32,
+ ptr addrspace(270) %ptr32,
; FIXME: The stackmap record generated for structs is incorrect:
; - https://github.com/llvm/llvm-project/issues/55649
; - https://github.com/llvm/llvm-project/issues/55957
diff --git a/llvm/test/CodeGen/X86/stack-protector-2.ll b/llvm/test/CodeGen/X86/stack-protector-2.ll
index f2fc64ab0c866..14909520f9149 100644
--- a/llvm/test/CodeGen/X86/stack-protector-2.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-2.ll
@@ -4,20 +4,20 @@
define void @store_captures() #0 {
; CHECK-LABEL: @store_captures(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
-; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca ptr
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: call void @llvm.stackprotector(ptr [[STACKGUARD]], ptr [[STACKGUARDSLOT]])
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
-; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[J:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store i32 0, ptr [[RETVAL]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
-; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
-; CHECK-NEXT: store i32* [[A]], i32** [[J]], align 8
-; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[A]], align 4
+; CHECK-NEXT: store ptr [[A]], ptr [[J]], align 8
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[STACKGUARD1]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
; CHECK: SP_return:
; CHECK-NEXT: ret void
@@ -28,49 +28,49 @@ define void @store_captures() #0 {
entry:
%retval = alloca i32, align 4
%a = alloca i32, align 4
- %j = alloca i32*, align 8
- store i32 0, i32* %retval
- %load = load i32, i32* %a, align 4
+ %j = alloca ptr, align 8
+ store i32 0, ptr %retval
+ %load = load i32, ptr %a, align 4
%add = add nsw i32 %load, 1
- store i32 %add, i32* %a, align 4
- store i32* %a, i32** %j, align 8
+ store i32 %add, ptr %a, align 4
+ store ptr %a, ptr %j, align 8
ret void
}
-define i32* @non_captures() #0 {
+define ptr @non_captures() #0 {
; load, atomicrmw, and ret do not trigger a stack protector.
; CHECK-LABEL: @non_captures(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[ATOM:%.*]] = atomicrmw add i32* [[A]], i32 1 seq_cst
-; CHECK-NEXT: ret i32* [[A]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: [[ATOM:%.*]] = atomicrmw add ptr [[A]], i32 1 seq_cst
+; CHECK-NEXT: ret ptr [[A]]
;
entry:
%a = alloca i32, align 4
- %load = load i32, i32* %a, align 4
- %atom = atomicrmw add i32* %a, i32 1 seq_cst
- ret i32* %a
+ %load = load i32, ptr %a, align 4
+ %atom = atomicrmw add ptr %a, i32 1 seq_cst
+ ret ptr %a
}
define void @store_addrspacecast_captures() #0 {
; CHECK-LABEL: @store_addrspacecast_captures(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
-; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca ptr
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: call void @llvm.stackprotector(ptr [[STACKGUARD]], ptr [[STACKGUARDSLOT]])
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[J:%.*]] = alloca i32 addrspace(1)*, align 8
-; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[J:%.*]] = alloca ptr addrspace(1), align 8
+; CHECK-NEXT: store i32 0, ptr [[RETVAL]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
-; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
-; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast i32* [[A]] to i32 addrspace(1)*
-; CHECK-NEXT: store i32 addrspace(1)* [[A_ADDRSPACECAST]], i32 addrspace(1)** [[J]], align 8
-; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[A]], align 4
+; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast ptr [[A]] to ptr addrspace(1)
+; CHECK-NEXT: store ptr addrspace(1) [[A_ADDRSPACECAST]], ptr [[J]], align 8
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[STACKGUARD1]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
; CHECK: SP_return:
; CHECK-NEXT: ret void
@@ -81,33 +81,33 @@ define void @store_addrspacecast_captures() #0 {
entry:
%retval = alloca i32, align 4
%a = alloca i32, align 4
- %j = alloca i32 addrspace(1)*, align 8
- store i32 0, i32* %retval
- %load = load i32, i32* %a, align 4
+ %j = alloca ptr addrspace(1), align 8
+ store i32 0, ptr %retval
+ %load = load i32, ptr %a, align 4
%add = add nsw i32 %load, 1
- store i32 %add, i32* %a, align 4
- %a.addrspacecast = addrspacecast i32* %a to i32 addrspace(1)*
- store i32 addrspace(1)* %a.addrspacecast, i32 addrspace(1)** %j, align 8
+ store i32 %add, ptr %a, align 4
+ %a.addrspacecast = addrspacecast ptr %a to ptr addrspace(1)
+ store ptr addrspace(1) %a.addrspacecast, ptr %j, align 8
ret void
}
define void @cmpxchg_captures() #0 {
; CHECK-LABEL: @cmpxchg_captures(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
-; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca ptr
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: call void @llvm.stackprotector(ptr [[STACKGUARD]], ptr [[STACKGUARDSLOT]])
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
-; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
+; CHECK-NEXT: [[J:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: store i32 0, ptr [[RETVAL]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
-; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg i32** [[J]], i32* null, i32* [[A]] seq_cst monotonic
-; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[A]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg ptr [[J]], ptr null, ptr [[A]] seq_cst monotonic
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: [[TMP1:%.*]] = load volatile ptr, ptr [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[STACKGUARD1]], [[TMP1]]
; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
; CHECK: SP_return:
; CHECK-NEXT: ret void
@@ -118,31 +118,30 @@ define void @cmpxchg_captures() #0 {
entry:
%retval = alloca i32, align 4
%a = alloca i32, align 4
- %j = alloca i32*, align 8
- store i32 0, i32* %retval
- %load = load i32, i32* %a, align 4
+ %j = alloca ptr, align 8
+ store i32 0, ptr %retval
+ %load = load i32, ptr %a, align 4
%add = add nsw i32 %load, 1
- store i32 %add, i32* %a, align 4
+ store i32 %add, ptr %a, align 4
- cmpxchg i32** %j, i32* null, i32* %a seq_cst monotonic
+ cmpxchg ptr %j, ptr null, ptr %a seq_cst monotonic
ret void
}
define void @memset_captures(i64 %c) #0 {
; CHECK-LABEL: @memset_captures(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
-; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca ptr
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: call void @llvm.stackprotector(ptr [[STACKGUARD]], ptr [[STACKGUARDSLOT]])
; CHECK-NEXT: [[CADDR:%.*]] = alloca i64, align 8
-; CHECK-NEXT: store i64 %c, i64* [[CADDR]], align 8
+; CHECK-NEXT: store i64 %c, ptr [[CADDR]], align 8
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[IPTR:%.*]] = bitcast i32* [[I]] to i8*
-; CHECK-NEXT: [[COUNT:%.*]] = load i64, i64* [[CADDR]], align 8
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[IPTR]], i8 0, i64 [[COUNT]], i1 false)
-; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
+; CHECK-NEXT: [[COUNT:%.*]] = load i64, ptr [[CADDR]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[I]], i8 0, i64 [[COUNT]], i1 false)
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257))
+; CHECK-NEXT: [[TMP1:%.*]] = load volatile ptr, ptr [[STACKGUARDSLOT]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[STACKGUARD1]], [[TMP1]]
; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
; CHECK: SP_return:
; CHECK-NEXT: ret void
@@ -152,18 +151,17 @@ define void @memset_captures(i64 %c) #0 {
;
entry:
%c.addr = alloca i64, align 8
- store i64 %c, i64* %c.addr, align 8
+ store i64 %c, ptr %c.addr, align 8
%i = alloca i32, align 4
- %i.ptr = bitcast i32* %i to i8*
- %count = load i64, i64* %c.addr, align 8
- call void @llvm.memset.p0i8.i64(i8* align 4 %i.ptr, i8 0, i64 %count, i1 false)
+ %count = load i64, ptr %c.addr, align 8
+ call void @llvm.memset.p0.i64(ptr align 4 %i, i8 0, i64 %count, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
; Intentionally does not have any fn attrs.
-declare dso_local void @foo(i8*)
+declare dso_local void @foo(ptr)
; @bar_sspstrong and @bar_nossp are the same function, but
diff er only in
; function attributes. Test that a callee without stack protector function
@@ -171,12 +169,12 @@ declare dso_local void @foo(i8*)
; have a stack protector slot.
define dso_local void @bar_sspstrong(i64 %0) #0 {
; CHECK-LABEL: @bar_sspstrong
-; CHECK-NEXT: %StackGuardSlot = alloca i8*
+; CHECK-NEXT: %StackGuardSlot = alloca ptr
%2 = alloca i64, align 8
- store i64 %0, i64* %2, align 8
- %3 = load i64, i64* %2, align 8
+ store i64 %0, ptr %2, align 8
+ %3 = load i64, ptr %2, align 8
%4 = alloca i8, i64 %3, align 16
- call void @foo(i8* %4)
+ call void @foo(ptr %4)
ret void
}
@@ -185,10 +183,10 @@ define dso_local void @bar_nossp(i64 %0) {
; CHECK-LABEL: @bar_nossp
; CHECK-NEXT: %2 = alloca i64
%2 = alloca i64, align 8
- store i64 %0, i64* %2, align 8
- %3 = load i64, i64* %2, align 8
+ store i64 %0, ptr %2, align 8
+ %3 = load i64, ptr %2, align 8
%4 = alloca i8, i64 %3, align 16
- call void @foo(i8* %4)
+ call void @foo(ptr %4)
ret void
}
@@ -200,9 +198,9 @@ entry:
br i1 %cmp, label %if.then, label %if.end
; CHECK: if.then: ; preds = %entry
-; CHECK-NEXT: %StackGuard1 = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*), align 8
-; CHECK-NEXT: %1 = load volatile i8*, i8** %StackGuardSlot, align 8
-; CHECK-NEXT: %2 = icmp eq i8* %StackGuard1, %1
+; CHECK-NEXT: %StackGuard1 = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257)), align 8
+; CHECK-NEXT: %1 = load volatile ptr, ptr %StackGuardSlot, align 8
+; CHECK-NEXT: %2 = icmp eq ptr %StackGuard1, %1
; CHECK-NEXT: br i1 %2, label %SP_return, label %CallStackCheckFailBlk
; CHECK: SP_return: ; preds = %if.then
; CHECK-NEXT: %call = call i32 @foo_no_return(i32 1)
diff --git a/llvm/test/CodeGen/X86/stack-protector-musttail.ll b/llvm/test/CodeGen/X86/stack-protector-musttail.ll
index aa6945af7c0e4..b0849ac8daa49 100644
--- a/llvm/test/CodeGen/X86/stack-protector-musttail.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-musttail.ll
@@ -1,6 +1,6 @@
; RUN: llc -mtriple=x86_64-linux-gnu -fast-isel %s -o - -start-before=stack-protector -stop-after=stack-protector | FileCheck %s
- at var = global [2 x i64]* null
+ at var = global ptr null
declare void @callee()
@@ -9,15 +9,15 @@ define void @caller1() sspreq {
; Prologue:
; CHECK: @llvm.stackprotector
-; CHECK: [[GUARD:%.*]] = load volatile i8*, i8*
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = load volatile ptr, ptr
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
; CHECK: br i1 [[TST]]
; CHECK: musttail call void @callee()
; CHECK-NEXT: ret void
%var = alloca [2 x i64]
- store [2 x i64]* %var, [2 x i64]** @var
+ store ptr %var, ptr @var
musttail call void @callee()
ret void
}
@@ -27,14 +27,14 @@ define void @justret() sspreq {
; Prologue:
; CHECK: @llvm.stackprotector
-; CHECK: [[GUARD:%.*]] = load volatile i8*, i8*
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = load volatile ptr, ptr
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
; CHECK: br i1 [[TST]]
; CHECK: ret void
%var = alloca [2 x i64]
- store [2 x i64]* %var, [2 x i64]** @var
+ store ptr %var, ptr @var
br label %retblock
retblock:
@@ -42,27 +42,25 @@ retblock:
}
-declare i64* @callee2()
+declare ptr @callee2()
-define i8* @caller2() sspreq {
-; CHECK-LABEL: define i8* @caller2()
+define ptr @caller2() sspreq {
+; CHECK-LABEL: define ptr @caller2()
; Prologue:
; CHECK: @llvm.stackprotector
-; CHECK: [[GUARD:%.*]] = load volatile i8*, i8*
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = load volatile ptr, ptr
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
; CHECK: br i1 [[TST]]
-; CHECK: [[TMP:%.*]] = musttail call i64* @callee2()
-; CHECK-NEXT: [[RES:%.*]] = bitcast i64* [[TMP]] to i8*
-; CHECK-NEXT: ret i8* [[RES]]
+; CHECK: [[TMP:%.*]] = musttail call ptr @callee2()
+; CHECK-NEXT: ret ptr [[TMP]]
%var = alloca [2 x i64]
- store [2 x i64]* %var, [2 x i64]** @var
- %tmp = musttail call i64* @callee2()
- %res = bitcast i64* %tmp to i8*
- ret i8* %res
+ store ptr %var, ptr @var
+ %tmp = musttail call ptr @callee2()
+ ret ptr %tmp
}
define void @caller3() sspreq {
@@ -70,36 +68,34 @@ define void @caller3() sspreq {
; Prologue:
; CHECK: @llvm.stackprotector
-; CHECK: [[GUARD:%.*]] = load volatile i8*, i8*
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = load volatile ptr, ptr
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
; CHECK: br i1 [[TST]]
; CHECK: tail call void @callee()
; CHECK-NEXT: ret void
%var = alloca [2 x i64]
- store [2 x i64]* %var, [2 x i64]** @var
+ store ptr %var, ptr @var
tail call void @callee()
ret void
}
-define i8* @caller4() sspreq {
-; CHECK-LABEL: define i8* @caller4()
+define ptr @caller4() sspreq {
+; CHECK-LABEL: define ptr @caller4()
; Prologue:
; CHECK: @llvm.stackprotector
-; CHECK: [[GUARD:%.*]] = load volatile i8*, i8*
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = load volatile ptr, ptr
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
; CHECK: br i1 [[TST]]
-; CHECK: [[TMP:%.*]] = tail call i64* @callee2()
-; CHECK-NEXT: [[RES:%.*]] = bitcast i64* [[TMP]] to i8*
-; CHECK-NEXT: ret i8* [[RES]]
+; CHECK: [[TMP:%.*]] = tail call ptr @callee2()
+; CHECK-NEXT: ret ptr [[TMP]]
%var = alloca [2 x i64]
- store [2 x i64]* %var, [2 x i64]** @var
- %tmp = tail call i64* @callee2()
- %res = bitcast i64* %tmp to i8*
- ret i8* %res
+ store ptr %var, ptr @var
+ %tmp = tail call ptr @callee2()
+ ret ptr %tmp
}
diff --git a/llvm/test/CodeGen/X86/stackmap-dynamic-alloca.ll b/llvm/test/CodeGen/X86/stackmap-dynamic-alloca.ll
index 784116cda4a96..46ec58c50cb1e 100644
--- a/llvm/test/CodeGen/X86/stackmap-dynamic-alloca.ll
+++ b/llvm/test/CodeGen/X86/stackmap-dynamic-alloca.ll
@@ -23,7 +23,7 @@
define void @f(i32 %nelems) {
entry:
%mem = alloca i32, i32 %nelems
- call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 0, i32* %mem)
+ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 0, ptr %mem)
ret void
}
More information about the llvm-commits
mailing list