[llvm] c93e7de - [InstCombine] Convert some tests to opaque pointers (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 27 07:31:29 PST 2022
Author: Nikita Popov
Date: 2022-12-27T16:29:52+01:00
New Revision: c93e7dec7e8075e717528555e05991d2e75a0648
URL: https://github.com/llvm/llvm-project/commit/c93e7dec7e8075e717528555e05991d2e75a0648
DIFF: https://github.com/llvm/llvm-project/commit/c93e7dec7e8075e717528555e05991d2e75a0648.diff
LOG: [InstCombine] Convert some tests to opaque pointers (NFC)
Check lines for these were regenerated, but without any
significant changes (mostly different GEP source element types).
Added:
Modified:
llvm/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
llvm/test/Transforms/InstCombine/compare-alloca.ll
llvm/test/Transforms/InstCombine/compare-unescaped.ll
llvm/test/Transforms/InstCombine/gepphigep.ll
llvm/test/Transforms/InstCombine/memchr-8.ll
llvm/test/Transforms/InstCombine/memcpy-addrspace.ll
llvm/test/Transforms/InstCombine/memrchr-4.ll
llvm/test/Transforms/InstCombine/select.ll
llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
llvm/test/Transforms/InstCombine/stpcpy-1.ll
llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll b/llvm/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
index 15ccce5ebc65..8433761376dd 100644
--- a/llvm/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
+++ b/llvm/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
@@ -7,16 +7,14 @@ target triple = "i386-apple-darwin9"
%struct.NSRect = type { [4 x float] }
-define void @foo(i8* %context) nounwind {
+define void @foo(ptr %context) nounwind {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[CONTEXT:%.*]] to %struct.NSRect*
-; CHECK-NEXT: call void (i32, ...) @bar(i32 3, %struct.NSRect* byval([[STRUCT_NSRECT:%.*]]) align 4 [[TMP1]]) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT: call void (i32, ...) @bar(i32 3, ptr byval([[STRUCT_NSRECT:%.*]]) align 4 [[CONTEXT:%.*]]) #[[ATTR0:[0-9]+]]
; CHECK-NEXT: ret void
;
entry:
- %tmp1 = bitcast i8* %context to %struct.NSRect* ; <%struct.NSRect*> [#uses=1]
- call void (i32, ...) @bar( i32 3, %struct.NSRect* byval(%struct.NSRect) align 4 %tmp1 ) nounwind
+ call void (i32, ...) @bar( i32 3, ptr byval(%struct.NSRect) align 4 %context ) nounwind
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/compare-alloca.ll b/llvm/test/Transforms/InstCombine/compare-alloca.ll
index 6201b346126d..6bea1adc1405 100644
--- a/llvm/test/Transforms/InstCombine/compare-alloca.ll
+++ b/llvm/test/Transforms/InstCombine/compare-alloca.ll
@@ -3,119 +3,118 @@
target datalayout = "p:32:32"
-define i1 @alloca_argument_compare(i64* %arg) {
+define i1 @alloca_argument_compare(ptr %arg) {
; CHECK-LABEL: @alloca_argument_compare(
; CHECK-NEXT: ret i1 false
;
%alloc = alloca i64
- %cmp = icmp eq i64* %arg, %alloc
+ %cmp = icmp eq ptr %arg, %alloc
ret i1 %cmp
}
-define i1 @alloca_argument_compare_swapped(i64* %arg) {
+define i1 @alloca_argument_compare_swapped(ptr %arg) {
; CHECK-LABEL: @alloca_argument_compare_swapped(
; CHECK-NEXT: ret i1 false
;
%alloc = alloca i64
- %cmp = icmp eq i64* %alloc, %arg
+ %cmp = icmp eq ptr %alloc, %arg
ret i1 %cmp
}
-define i1 @alloca_argument_compare_ne(i64* %arg) {
+define i1 @alloca_argument_compare_ne(ptr %arg) {
; CHECK-LABEL: @alloca_argument_compare_ne(
; CHECK-NEXT: ret i1 true
;
%alloc = alloca i64
- %cmp = icmp ne i64* %arg, %alloc
+ %cmp = icmp ne ptr %arg, %alloc
ret i1 %cmp
}
-define i1 @alloca_argument_compare_derived_ptrs(i64* %arg, i64 %x) {
+define i1 @alloca_argument_compare_derived_ptrs(ptr %arg, i64 %x) {
; CHECK-LABEL: @alloca_argument_compare_derived_ptrs(
; CHECK-NEXT: ret i1 false
;
%alloc = alloca i64, i64 8
- %p = getelementptr i64, i64* %arg, i64 %x
- %q = getelementptr i64, i64* %alloc, i64 3
- %cmp = icmp eq i64* %p, %q
+ %p = getelementptr i64, ptr %arg, i64 %x
+ %q = getelementptr i64, ptr %alloc, i64 3
+ %cmp = icmp eq ptr %p, %q
ret i1 %cmp
}
-declare void @escape(i64*)
-define i1 @alloca_argument_compare_escaped_alloca(i64* %arg) {
+declare void @escape(ptr)
+define i1 @alloca_argument_compare_escaped_alloca(ptr %arg) {
; CHECK-LABEL: @alloca_argument_compare_escaped_alloca(
; CHECK-NEXT: [[ALLOC:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @escape(i64* nonnull [[ALLOC]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64* [[ALLOC]], [[ARG:%.*]]
+; CHECK-NEXT: call void @escape(ptr nonnull [[ALLOC]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[ALLOC]], [[ARG:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%alloc = alloca i64
- call void @escape(i64* %alloc)
- %cmp = icmp eq i64* %alloc, %arg
+ call void @escape(ptr %alloc)
+ %cmp = icmp eq ptr %alloc, %arg
ret i1 %cmp
}
declare void @check_compares(i1, i1)
-define void @alloca_argument_compare_two_compares(i64* %p) {
+define void @alloca_argument_compare_two_compares(ptr %p) {
; CHECK-LABEL: @alloca_argument_compare_two_compares(
; CHECK-NEXT: [[Q1:%.*]] = alloca [8 x i64], align 8
-; CHECK-NEXT: [[Q1_SUB:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[Q1]], i32 0, i32 0
-; CHECK-NEXT: [[R:%.*]] = getelementptr i64, i64* [[P:%.*]], i32 1
-; CHECK-NEXT: [[S:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[Q1]], i32 0, i32 2
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i64* [[Q1_SUB]], [[P]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i64* [[R]], [[S]]
+; CHECK-NEXT: [[R:%.*]] = getelementptr i64, ptr [[P:%.*]], i32 1
+; CHECK-NEXT: [[S:%.*]] = getelementptr inbounds i64, ptr [[Q1]], i32 2
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[Q1]], [[P]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[R]], [[S]]
; CHECK-NEXT: call void @check_compares(i1 [[CMP1]], i1 [[CMP2]])
; CHECK-NEXT: ret void
;
%q = alloca i64, i64 8
- %r = getelementptr i64, i64* %p, i64 1
- %s = getelementptr i64, i64* %q, i64 2
- %cmp1 = icmp eq i64* %p, %q
- %cmp2 = icmp eq i64* %r, %s
+ %r = getelementptr i64, ptr %p, i64 1
+ %s = getelementptr i64, ptr %q, i64 2
+ %cmp1 = icmp eq ptr %p, %q
+ %cmp2 = icmp eq ptr %r, %s
call void @check_compares(i1 %cmp1, i1 %cmp2)
ret void
; We will only fold if there is a single cmp.
}
-define i1 @alloca_argument_compare_escaped_through_store(i64* %arg, i64** %ptr) {
+define i1 @alloca_argument_compare_escaped_through_store(ptr %arg, ptr %ptr) {
; CHECK-LABEL: @alloca_argument_compare_escaped_through_store(
; CHECK-NEXT: [[ALLOC:%.*]] = alloca i64, align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64* [[ALLOC]], [[ARG:%.*]]
-; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds i64, i64* [[ALLOC]], i32 1
-; CHECK-NEXT: store i64* [[P]], i64** [[PTR:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[ALLOC]], [[ARG:%.*]]
+; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds i64, ptr [[ALLOC]], i32 1
+; CHECK-NEXT: store ptr [[P]], ptr [[PTR:%.*]], align 4
; CHECK-NEXT: ret i1 [[CMP]]
;
%alloc = alloca i64
- %cmp = icmp eq i64* %alloc, %arg
- %p = getelementptr i64, i64* %alloc, i64 1
- store i64* %p, i64** %ptr
+ %cmp = icmp eq ptr %alloc, %arg
+ %p = getelementptr i64, ptr %alloc, i64 1
+ store ptr %p, ptr %ptr
ret i1 %cmp
}
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
-define i1 @alloca_argument_compare_benign_instrs(i8* %arg) {
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+define i1 @alloca_argument_compare_benign_instrs(ptr %arg) {
; CHECK-LABEL: @alloca_argument_compare_benign_instrs(
; CHECK-NEXT: ret i1 false
;
%alloc = alloca i8
- call void @llvm.lifetime.start.p0i8(i64 1, i8* %alloc)
- %cmp = icmp eq i8* %arg, %alloc
- %x = load i8, i8* %arg
- store i8 %x, i8* %alloc
- call void @llvm.lifetime.end.p0i8(i64 1, i8* %alloc)
+ call void @llvm.lifetime.start.p0(i64 1, ptr %alloc)
+ %cmp = icmp eq ptr %arg, %alloc
+ %x = load i8, ptr %arg
+ store i8 %x, ptr %alloc
+ call void @llvm.lifetime.end.p0(i64 1, ptr %alloc)
ret i1 %cmp
}
-declare i64* @allocator()
+declare ptr @allocator()
define i1 @alloca_call_compare() {
; CHECK-LABEL: @alloca_call_compare(
-; CHECK-NEXT: [[Q:%.*]] = call i64* @allocator()
+; CHECK-NEXT: [[Q:%.*]] = call ptr @allocator()
; CHECK-NEXT: ret i1 false
;
%p = alloca i64
- %q = call i64* @allocator()
- %cmp = icmp eq i64* %p, %q
+ %q = call ptr @allocator()
+ %cmp = icmp eq ptr %p, %q
ret i1 %cmp
}
@@ -129,16 +128,16 @@ define i1 @alloca_call_compare() {
; These two functions represents either a) forging a pointer via inttoptr or
; b) indexing off an adjacent allocation. In either case, the operation is
; obscured by an uninlined helper and not visible to instcombine.
-declare i8* @hidden_inttoptr()
-declare i8* @hidden_offset(i8* %other)
+declare ptr @hidden_inttoptr()
+declare ptr @hidden_offset(ptr %other)
define i1 @ptrtoint_single_cmp() {
; CHECK-LABEL: @ptrtoint_single_cmp(
; CHECK-NEXT: ret i1 false
;
%m = alloca i8, i32 4
- %rhs = inttoptr i64 2048 to i8*
- %cmp = icmp eq i8* %m, %rhs
+ %rhs = inttoptr i64 2048 to ptr
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
@@ -148,8 +147,8 @@ define i1 @offset_single_cmp() {
;
%m = alloca i8, i32 4
%n = alloca i8, i32 4
- %rhs = getelementptr i8, i8* %n, i32 4
- %cmp = icmp eq i8* %m, %rhs
+ %rhs = getelementptr i8, ptr %n, i32 4
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
@@ -158,18 +157,17 @@ declare void @witness(i1, i1)
define void @neg_consistent_fold1() {
; CHECK-LABEL: @neg_consistent_fold1(
; CHECK-NEXT: [[M1:%.*]] = alloca [4 x i8], align 1
-; CHECK-NEXT: [[M1_SUB:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[M1]], i32 0, i32 0
-; CHECK-NEXT: [[RHS2:%.*]] = call i8* @hidden_inttoptr()
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8* [[M1_SUB]], inttoptr (i64 2048 to i8*)
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8* [[M1_SUB]], [[RHS2]]
+; CHECK-NEXT: [[RHS2:%.*]] = call ptr @hidden_inttoptr()
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[M1]], inttoptr (i64 2048 to ptr)
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[M1]], [[RHS2]]
; CHECK-NEXT: call void @witness(i1 [[CMP1]], i1 [[CMP2]])
; CHECK-NEXT: ret void
;
%m = alloca i8, i32 4
- %rhs = inttoptr i64 2048 to i8*
- %rhs2 = call i8* @hidden_inttoptr()
- %cmp1 = icmp eq i8* %m, %rhs
- %cmp2 = icmp eq i8* %m, %rhs2
+ %rhs = inttoptr i64 2048 to ptr
+ %rhs2 = call ptr @hidden_inttoptr()
+ %cmp1 = icmp eq ptr %m, %rhs
+ %cmp2 = icmp eq ptr %m, %rhs2
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
@@ -178,42 +176,38 @@ define void @neg_consistent_fold2() {
; CHECK-LABEL: @neg_consistent_fold2(
; CHECK-NEXT: [[M1:%.*]] = alloca [4 x i8], align 1
; CHECK-NEXT: [[N2:%.*]] = alloca [4 x i8], align 1
-; CHECK-NEXT: [[N2_SUB:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[N2]], i32 0, i32 0
-; CHECK-NEXT: [[M1_SUB:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[M1]], i32 0, i32 0
-; CHECK-NEXT: [[RHS:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[N2]], i32 0, i32 4
-; CHECK-NEXT: [[RHS2:%.*]] = call i8* @hidden_offset(i8* nonnull [[N2_SUB]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8* [[M1_SUB]], [[RHS]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8* [[M1_SUB]], [[RHS2]]
+; CHECK-NEXT: [[RHS:%.*]] = getelementptr inbounds i8, ptr [[N2]], i32 4
+; CHECK-NEXT: [[RHS2:%.*]] = call ptr @hidden_offset(ptr nonnull [[N2]])
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[M1]], [[RHS]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[M1]], [[RHS2]]
; CHECK-NEXT: call void @witness(i1 [[CMP1]], i1 [[CMP2]])
; CHECK-NEXT: ret void
;
%m = alloca i8, i32 4
%n = alloca i8, i32 4
- %rhs = getelementptr i8, i8* %n, i32 4
- %rhs2 = call i8* @hidden_offset(i8* %n)
- %cmp1 = icmp eq i8* %m, %rhs
- %cmp2 = icmp eq i8* %m, %rhs2
+ %rhs = getelementptr i8, ptr %n, i32 4
+ %rhs2 = call ptr @hidden_offset(ptr %n)
+ %cmp1 = icmp eq ptr %m, %rhs
+ %cmp2 = icmp eq ptr %m, %rhs2
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
define void @neg_consistent_fold3() {
; CHECK-LABEL: @neg_consistent_fold3(
-; CHECK-NEXT: [[M1:%.*]] = alloca i32, align 1
-; CHECK-NEXT: [[M1_SUB:%.*]] = bitcast i32* [[M1]] to i8*
-; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8
-; CHECK-NEXT: [[RHS2:%.*]] = call i8* @hidden_inttoptr()
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32* [[M1]], [[LGP]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8* [[RHS2]], [[M1_SUB]]
+; CHECK-NEXT: [[M1:%.*]] = alloca [4 x i8], align 1
+; CHECK-NEXT: [[LGP:%.*]] = load ptr, ptr @gp, align 8
+; CHECK-NEXT: [[RHS2:%.*]] = call ptr @hidden_inttoptr()
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[M1]], [[LGP]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[M1]], [[RHS2]]
; CHECK-NEXT: call void @witness(i1 [[CMP1]], i1 [[CMP2]])
; CHECK-NEXT: ret void
;
%m = alloca i8, i32 4
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8
- %rhs2 = call i8* @hidden_inttoptr()
- %cmp1 = icmp eq i32* %bc, %lgp
- %cmp2 = icmp eq i8* %m, %rhs2
+ %lgp = load ptr, ptr @gp, align 8
+ %rhs2 = call ptr @hidden_inttoptr()
+ %cmp1 = icmp eq ptr %m, %lgp
+ %cmp2 = icmp eq ptr %m, %rhs2
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
@@ -224,10 +218,9 @@ define void @neg_consistent_fold4() {
; CHECK-NEXT: ret void
;
%m = alloca i8, i32 4
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8
- %cmp1 = icmp eq i32* %bc, %lgp
- %cmp2 = icmp eq i32* %bc, %lgp
+ %lgp = load ptr, ptr @gp, align 8
+ %cmp1 = icmp eq ptr %m, %lgp
+ %cmp2 = icmp eq ptr %m, %lgp
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
@@ -236,54 +229,50 @@ define void @neg_consistent_fold4() {
; assumption) not able to contain a comparison which might capture the
; address.
-declare void @unknown(i8*)
+declare void @unknown(ptr)
; TODO: Missing optimization
define i1 @consistent_nocapture_inttoptr() {
; CHECK-LABEL: @consistent_nocapture_inttoptr(
; CHECK-NEXT: [[M1:%.*]] = alloca [4 x i8], align 1
-; CHECK-NEXT: [[M1_SUB:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[M1]], i32 0, i32 0
-; CHECK-NEXT: call void @unknown(i8* nocapture nonnull [[M1_SUB]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[M1_SUB]], inttoptr (i64 2048 to i8*)
+; CHECK-NEXT: call void @unknown(ptr nocapture nonnull [[M1]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[M1]], inttoptr (i64 2048 to ptr)
; CHECK-NEXT: ret i1 [[CMP]]
;
%m = alloca i8, i32 4
- call void @unknown(i8* nocapture %m)
- %rhs = inttoptr i64 2048 to i8*
- %cmp = icmp eq i8* %m, %rhs
+ call void @unknown(ptr nocapture %m)
+ %rhs = inttoptr i64 2048 to ptr
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
define i1 @consistent_nocapture_offset() {
; CHECK-LABEL: @consistent_nocapture_offset(
; CHECK-NEXT: [[M1:%.*]] = alloca [4 x i8], align 1
-; CHECK-NEXT: [[M1_SUB:%.*]] = getelementptr inbounds [4 x i8], [4 x i8]* [[M1]], i32 0, i32 0
-; CHECK-NEXT: call void @unknown(i8* nocapture nonnull [[M1_SUB]])
+; CHECK-NEXT: call void @unknown(ptr nocapture nonnull [[M1]])
; CHECK-NEXT: ret i1 false
;
%m = alloca i8, i32 4
- call void @unknown(i8* nocapture %m)
+ call void @unknown(ptr nocapture %m)
%n = alloca i8, i32 4
- %rhs = getelementptr i8, i8* %n, i32 4
- %cmp = icmp eq i8* %m, %rhs
+ %rhs = getelementptr i8, ptr %n, i32 4
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
- at gp = global i32* null, align 8
+ at gp = global ptr null, align 8
; TODO: Missing optimization
define i1 @consistent_nocapture_through_global() {
; CHECK-LABEL: @consistent_nocapture_through_global(
-; CHECK-NEXT: [[M1:%.*]] = alloca i32, align 1
-; CHECK-NEXT: [[M1_SUB:%.*]] = bitcast i32* [[M1]] to i8*
-; CHECK-NEXT: call void @unknown(i8* nocapture nonnull [[M1_SUB]])
-; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[M1]], [[LGP]]
+; CHECK-NEXT: [[M1:%.*]] = alloca [4 x i8], align 1
+; CHECK-NEXT: call void @unknown(ptr nocapture nonnull [[M1]])
+; CHECK-NEXT: [[LGP:%.*]] = load ptr, ptr @gp, align 8, !nonnull !0
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[M1]], [[LGP]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%m = alloca i8, i32 4
- call void @unknown(i8* nocapture %m)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8, !nonnull !{}
- %cmp = icmp eq i32* %bc, %lgp
+ call void @unknown(ptr nocapture %m)
+ %lgp = load ptr, ptr @gp, align 8, !nonnull !{}
+ %cmp = icmp eq ptr %m, %lgp
ret i1 %cmp
}
diff --git a/llvm/test/Transforms/InstCombine/compare-unescaped.ll b/llvm/test/Transforms/InstCombine/compare-unescaped.ll
index 2d459046c0a8..e661cf9d31c5 100644
--- a/llvm/test/Transforms/InstCombine/compare-unescaped.ll
+++ b/llvm/test/Transforms/InstCombine/compare-unescaped.ll
@@ -1,18 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
- at gp = global i32* null, align 8
+ at gp = global ptr null, align 8
-declare noalias i8* @malloc(i64) allockind("alloc,uninitialized") allocsize(0)
+declare noalias ptr @malloc(i64) allockind("alloc,uninitialized") allocsize(0)
define i1 @compare_global_trivialeq() {
; CHECK-LABEL: @compare_global_trivialeq(
; CHECK-NEXT: ret i1 false
;
- %m = call i8* @malloc(i64 4)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8
- %cmp = icmp eq i32* %bc, %lgp
+ %m = call ptr @malloc(i64 4)
+ %lgp = load ptr, ptr @gp, align 8
+ %cmp = icmp eq ptr %m, %lgp
ret i1 %cmp
}
@@ -20,10 +19,9 @@ define i1 @compare_global_trivialne() {
; CHECK-LABEL: @compare_global_trivialne(
; CHECK-NEXT: ret i1 true
;
- %m = call i8* @malloc(i64 4)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8
- %cmp = icmp ne i32* %bc, %lgp
+ %m = call ptr @malloc(i64 4)
+ %lgp = load ptr, ptr @gp, align 8
+ %cmp = icmp ne ptr %m, %lgp
ret i1 %cmp
}
@@ -34,75 +32,69 @@ define i1 @compare_global_trivialne() {
declare void @f()
define i1 @compare_and_call_with_deopt() {
; CHECK-LABEL: @compare_and_call_with_deopt(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
-; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) ptr @malloc(i64 24)
+; CHECK-NEXT: tail call void @f() [ "deopt"(ptr [[M]]) ]
; CHECK-NEXT: ret i1 false
;
- %m = call i8* @malloc(i64 24)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8, !nonnull !0
- %cmp = icmp eq i32* %lgp, %bc
- tail call void @f() [ "deopt"(i8* %m) ]
+ %m = call ptr @malloc(i64 24)
+ %lgp = load ptr, ptr @gp, align 8, !nonnull !0
+ %cmp = icmp eq ptr %lgp, %m
+ tail call void @f() [ "deopt"(ptr %m) ]
ret i1 %cmp
}
; Same functon as above with deopt operand in function f, but comparison is NE
define i1 @compare_ne_and_call_with_deopt() {
; CHECK-LABEL: @compare_ne_and_call_with_deopt(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
-; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) ptr @malloc(i64 24)
+; CHECK-NEXT: tail call void @f() [ "deopt"(ptr [[M]]) ]
; CHECK-NEXT: ret i1 true
;
- %m = call i8* @malloc(i64 24)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8, !nonnull !0
- %cmp = icmp ne i32* %lgp, %bc
- tail call void @f() [ "deopt"(i8* %m) ]
+ %m = call ptr @malloc(i64 24)
+ %lgp = load ptr, ptr @gp, align 8, !nonnull !0
+ %cmp = icmp ne ptr %lgp, %m
+ tail call void @f() [ "deopt"(ptr %m) ]
ret i1 %cmp
}
; Same function as above, but global not marked nonnull, and we cannot fold the comparison
define i1 @compare_ne_global_maybe_null() {
; CHECK-LABEL: @compare_ne_global_maybe_null(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
-; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32*
-; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LGP]], [[BC]]
-; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) ptr @malloc(i64 24)
+; CHECK-NEXT: [[LGP:%.*]] = load ptr, ptr @gp, align 8
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LGP]], [[M]]
+; CHECK-NEXT: tail call void @f() [ "deopt"(ptr [[M]]) ]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %m = call i8* @malloc(i64 24)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp
- %cmp = icmp ne i32* %lgp, %bc
- tail call void @f() [ "deopt"(i8* %m) ]
+ %m = call ptr @malloc(i64 24)
+ %lgp = load ptr, ptr @gp
+ %cmp = icmp ne ptr %lgp, %m
+ tail call void @f() [ "deopt"(ptr %m) ]
ret i1 %cmp
}
; FIXME: The comparison should fold to false since %m escapes (call to function escape)
; after the comparison.
-declare void @escape(i8*)
+declare void @escape(ptr)
define i1 @compare_and_call_after() {
; CHECK-LABEL: @compare_and_call_after(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
-; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32*
-; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[LGP]], [[BC]]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) ptr @malloc(i64 24)
+; CHECK-NEXT: [[LGP:%.*]] = load ptr, ptr @gp, align 8, !nonnull !0
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[M]], [[LGP]]
; CHECK-NEXT: br i1 [[CMP]], label [[ESCAPE_CALL:%.*]], label [[JUST_RETURN:%.*]]
; CHECK: escape_call:
-; CHECK-NEXT: call void @escape(i8* [[M]])
+; CHECK-NEXT: call void @escape(ptr [[M]])
; CHECK-NEXT: ret i1 true
; CHECK: just_return:
; CHECK-NEXT: ret i1 [[CMP]]
;
- %m = call i8* @malloc(i64 24)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8, !nonnull !0
- %cmp = icmp eq i32* %bc, %lgp
+ %m = call ptr @malloc(i64 24)
+ %lgp = load ptr, ptr @gp, align 8, !nonnull !0
+ %cmp = icmp eq ptr %m, %lgp
br i1 %cmp, label %escape_call, label %just_return
escape_call:
- call void @escape(i8* %m)
+ call void @escape(ptr %m)
ret i1 true
just_return:
@@ -113,9 +105,9 @@ define i1 @compare_distinct_mallocs() {
; CHECK-LABEL: @compare_distinct_mallocs(
; CHECK-NEXT: ret i1 false
;
- %m = call i8* @malloc(i64 4)
- %n = call i8* @malloc(i64 4)
- %cmp = icmp eq i8* %m, %n
+ %m = call ptr @malloc(i64 4)
+ %n = call ptr @malloc(i64 4)
+ %cmp = icmp eq ptr %m, %n
ret i1 %cmp
}
@@ -125,10 +117,8 @@ define i1 @compare_samepointer_under_bitcast() {
; CHECK-LABEL: @compare_samepointer_under_bitcast(
; CHECK-NEXT: ret i1 true
;
- %m = call i8* @malloc(i64 4)
- %bc = bitcast i8* %m to i32*
- %bcback = bitcast i32* %bc to i8*
- %cmp = icmp eq i8* %m, %bcback
+ %m = call ptr @malloc(i64 4)
+ %cmp = icmp eq ptr %m, %m
ret i1 %cmp
}
@@ -136,15 +126,13 @@ define i1 @compare_samepointer_under_bitcast() {
; The malloc call for %m cannot be elided since it is used in the call to function f.
define i1 @compare_samepointer_escaped() {
; CHECK-LABEL: @compare_samepointer_escaped(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: call void @f() [ "deopt"(i8* [[M]]) ]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: call void @f() [ "deopt"(ptr [[M]]) ]
; CHECK-NEXT: ret i1 true
;
- %m = call i8* @malloc(i64 4)
- %bc = bitcast i8* %m to i32*
- %bcback = bitcast i32* %bc to i8*
- %cmp = icmp eq i8* %m, %bcback
- call void @f() [ "deopt"(i8* %m) ]
+ %m = call ptr @malloc(i64 4)
+ %cmp = icmp eq ptr %m, %m
+ call void @f() [ "deopt"(ptr %m) ]
ret i1 %cmp
}
@@ -154,52 +142,50 @@ define i1 @compare_samepointer_escaped() {
; FIXME: Folding this %cmp2 when %m escapes through ret could be an issue with
; cross-threading data dependencies since we do not make the distinction between
; atomic and non-atomic loads in capture tracking.
-define i8* @compare_ret_escape(i8* %c) {
+define ptr @compare_ret_escape(ptr %c) {
; CHECK-LABEL: @compare_ret_escape(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[N:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[N]], [[C:%.*]]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[N:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[N]], [[C:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[RETST:%.*]], label [[CHK:%.*]]
; CHECK: retst:
-; CHECK-NEXT: ret i8* [[M]]
+; CHECK-NEXT: ret ptr [[M]]
; CHECK: chk:
-; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32*
-; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32* [[LGP]], [[BC]]
+; CHECK-NEXT: [[LGP:%.*]] = load ptr, ptr @gp, align 8, !nonnull !0
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[M]], [[LGP]]
; CHECK-NEXT: br i1 [[CMP2]], label [[RETST]], label [[CHK2:%.*]]
; CHECK: chk2:
-; CHECK-NEXT: ret i8* [[N]]
+; CHECK-NEXT: ret ptr [[N]]
;
- %m = call i8* @malloc(i64 4)
- %n = call i8* @malloc(i64 4)
- %cmp = icmp eq i8* %n, %c
+ %m = call ptr @malloc(i64 4)
+ %n = call ptr @malloc(i64 4)
+ %cmp = icmp eq ptr %n, %c
br i1 %cmp, label %retst, label %chk
retst:
- ret i8* %m
+ ret ptr %m
chk:
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8, !nonnull !0
- %cmp2 = icmp eq i32* %bc, %lgp
+ %lgp = load ptr, ptr @gp, align 8, !nonnull !0
+ %cmp2 = icmp eq ptr %m, %lgp
br i1 %cmp2, label %retst, label %chk2
chk2:
- ret i8* %n
+ ret ptr %n
}
; The malloc call for %m cannot be elided since it is used in the call to function f.
; However, the cmp can be folded to true as %n doesnt escape and %m, %n are distinct allocations
define i1 @compare_distinct_pointer_escape() {
; CHECK-LABEL: @compare_distinct_pointer_escape(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: tail call void @f() [ "deopt"(ptr [[M]]) ]
; CHECK-NEXT: ret i1 true
;
- %m = call i8* @malloc(i64 4)
- %n = call i8* @malloc(i64 4)
- tail call void @f() [ "deopt"(i8* %m) ]
- %cmp = icmp ne i8* %m, %n
+ %m = call ptr @malloc(i64 4)
+ %n = call ptr @malloc(i64 4)
+ tail call void @f() [ "deopt"(ptr %m) ]
+ %cmp = icmp ne ptr %m, %n
ret i1 %cmp
}
@@ -213,19 +199,19 @@ define i1 @compare_distinct_pointer_escape() {
; These two functions represents either a) forging a pointer via inttoptr or
; b) indexing off an adjacent allocation. In either case, the operation is
; obscured by an uninlined helper and not visible to instcombine.
-declare i8* @hidden_inttoptr()
-declare i8* @hidden_offset(i8* %other)
+declare ptr @hidden_inttoptr()
+declare ptr @hidden_offset(ptr %other)
; FIXME: Missed oppurtunity
define i1 @ptrtoint_single_cmp() {
; CHECK-LABEL: @ptrtoint_single_cmp(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[M]], inttoptr (i64 2048 to i8*)
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[M]], inttoptr (i64 2048 to ptr)
; CHECK-NEXT: ret i1 [[CMP]]
;
- %m = call i8* @malloc(i64 4)
- %rhs = inttoptr i64 2048 to i8*
- %cmp = icmp eq i8* %m, %rhs
+ %m = call ptr @malloc(i64 4)
+ %rhs = inttoptr i64 2048 to ptr
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
@@ -233,10 +219,10 @@ define i1 @offset_single_cmp() {
; CHECK-LABEL: @offset_single_cmp(
; CHECK-NEXT: ret i1 false
;
- %m = call i8* @malloc(i64 4)
- %n = call i8* @malloc(i64 4)
- %rhs = getelementptr i8, i8* %n, i32 4
- %cmp = icmp eq i8* %m, %rhs
+ %m = call ptr @malloc(i64 4)
+ %n = call ptr @malloc(i64 4)
+ %rhs = getelementptr i8, ptr %n, i32 4
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
@@ -244,60 +230,58 @@ declare void @witness(i1, i1)
define void @neg_consistent_fold1() {
; CHECK-LABEL: @neg_consistent_fold1(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[RHS2:%.*]] = call i8* @hidden_inttoptr()
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8* [[M]], inttoptr (i64 2048 to i8*)
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8* [[M]], [[RHS2]]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[RHS2:%.*]] = call ptr @hidden_inttoptr()
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[M]], inttoptr (i64 2048 to ptr)
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[M]], [[RHS2]]
; CHECK-NEXT: call void @witness(i1 [[CMP1]], i1 [[CMP2]])
; CHECK-NEXT: ret void
;
- %m = call i8* @malloc(i64 4)
- %rhs = inttoptr i64 2048 to i8*
- %rhs2 = call i8* @hidden_inttoptr()
- %cmp1 = icmp eq i8* %m, %rhs
- %cmp2 = icmp eq i8* %m, %rhs2
+ %m = call ptr @malloc(i64 4)
+ %rhs = inttoptr i64 2048 to ptr
+ %rhs2 = call ptr @hidden_inttoptr()
+ %cmp1 = icmp eq ptr %m, %rhs
+ %cmp2 = icmp eq ptr %m, %rhs2
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
define void @neg_consistent_fold2() {
; CHECK-LABEL: @neg_consistent_fold2(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[N:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[RHS:%.*]] = getelementptr i8, i8* [[N]], i64 4
-; CHECK-NEXT: [[RHS2:%.*]] = call i8* @hidden_offset(i8* [[N]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8* [[M]], [[RHS]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8* [[M]], [[RHS2]]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[N:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[RHS:%.*]] = getelementptr i8, ptr [[N]], i64 4
+; CHECK-NEXT: [[RHS2:%.*]] = call ptr @hidden_offset(ptr [[N]])
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[M]], [[RHS]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[M]], [[RHS2]]
; CHECK-NEXT: call void @witness(i1 [[CMP1]], i1 [[CMP2]])
; CHECK-NEXT: ret void
;
- %m = call i8* @malloc(i64 4)
- %n = call i8* @malloc(i64 4)
- %rhs = getelementptr i8, i8* %n, i32 4
- %rhs2 = call i8* @hidden_offset(i8* %n)
- %cmp1 = icmp eq i8* %m, %rhs
- %cmp2 = icmp eq i8* %m, %rhs2
+ %m = call ptr @malloc(i64 4)
+ %n = call ptr @malloc(i64 4)
+ %rhs = getelementptr i8, ptr %n, i32 4
+ %rhs2 = call ptr @hidden_offset(ptr %n)
+ %cmp1 = icmp eq ptr %m, %rhs
+ %cmp2 = icmp eq ptr %m, %rhs2
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
define void @neg_consistent_fold3() {
; CHECK-LABEL: @neg_consistent_fold3(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32*
-; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8
-; CHECK-NEXT: [[RHS2:%.*]] = call i8* @hidden_inttoptr()
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32* [[LGP]], [[BC]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8* [[M]], [[RHS2]]
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[LGP:%.*]] = load ptr, ptr @gp, align 8
+; CHECK-NEXT: [[RHS2:%.*]] = call ptr @hidden_inttoptr()
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[M]], [[LGP]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq ptr [[M]], [[RHS2]]
; CHECK-NEXT: call void @witness(i1 [[CMP1]], i1 [[CMP2]])
; CHECK-NEXT: ret void
;
- %m = call i8* @malloc(i64 4)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8
- %rhs2 = call i8* @hidden_inttoptr()
- %cmp1 = icmp eq i32* %bc, %lgp
- %cmp2 = icmp eq i8* %m, %rhs2
+ %m = call ptr @malloc(i64 4)
+ %lgp = load ptr, ptr @gp, align 8
+ %rhs2 = call ptr @hidden_inttoptr()
+ %cmp1 = icmp eq ptr %m, %lgp
+ %cmp2 = icmp eq ptr %m, %rhs2
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
@@ -310,16 +294,15 @@ define void @neg_consistent_fold4() {
; CHECK-NEXT: call void @witness(i1 false, i1 false)
; CHECK-NEXT: ret void
;
- %m = call i8* @malloc(i64 4)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8
- %cmp1 = icmp eq i32* %bc, %lgp
- %cmp2 = icmp eq i32* %bc, %lgp
+ %m = call ptr @malloc(i64 4)
+ %lgp = load ptr, ptr @gp, align 8
+ %cmp1 = icmp eq ptr %m, %lgp
+ %cmp2 = icmp eq ptr %m, %lgp
call void @witness(i1 %cmp1, i1 %cmp2)
ret void
}
-declare void @unknown(i8*)
+declare void @unknown(ptr)
; Points out that a nocapture call can't cause a consistent result issue
; as it is (by assumption) not able to contain a comparison which might
@@ -327,43 +310,42 @@ declare void @unknown(i8*)
define i1 @consistent_nocapture_inttoptr() {
; CHECK-LABEL: @consistent_nocapture_inttoptr(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: call void @unknown(i8* nocapture [[M]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[M]], inttoptr (i64 2048 to i8*)
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: call void @unknown(ptr nocapture [[M]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[M]], inttoptr (i64 2048 to ptr)
; CHECK-NEXT: ret i1 [[CMP]]
;
- %m = call i8* @malloc(i64 4)
- call void @unknown(i8* nocapture %m)
- %rhs = inttoptr i64 2048 to i8*
- %cmp = icmp eq i8* %m, %rhs
+ %m = call ptr @malloc(i64 4)
+ call void @unknown(ptr nocapture %m)
+ %rhs = inttoptr i64 2048 to ptr
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
define i1 @consistent_nocapture_offset() {
; CHECK-LABEL: @consistent_nocapture_offset(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: call void @unknown(i8* nocapture [[M]])
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: call void @unknown(ptr nocapture [[M]])
; CHECK-NEXT: ret i1 false
;
- %m = call i8* @malloc(i64 4)
- call void @unknown(i8* nocapture %m)
- %n = call i8* @malloc(i64 4)
- %rhs = getelementptr i8, i8* %n, i32 4
- %cmp = icmp eq i8* %m, %rhs
+ %m = call ptr @malloc(i64 4)
+ call void @unknown(ptr nocapture %m)
+ %n = call ptr @malloc(i64 4)
+ %rhs = getelementptr i8, ptr %n, i32 4
+ %cmp = icmp eq ptr %m, %rhs
ret i1 %cmp
}
define i1 @consistent_nocapture_through_global() {
; CHECK-LABEL: @consistent_nocapture_through_global(
-; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
-; CHECK-NEXT: call void @unknown(i8* nocapture [[M]])
+; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) ptr @malloc(i64 4)
+; CHECK-NEXT: call void @unknown(ptr nocapture [[M]])
; CHECK-NEXT: ret i1 false
;
- %m = call i8* @malloc(i64 4)
- call void @unknown(i8* nocapture %m)
- %bc = bitcast i8* %m to i32*
- %lgp = load i32*, i32** @gp, align 8, !nonnull !0
- %cmp = icmp eq i32* %bc, %lgp
+ %m = call ptr @malloc(i64 4)
+ call void @unknown(ptr nocapture %m)
+ %lgp = load ptr, ptr @gp, align 8, !nonnull !0
+ %cmp = icmp eq ptr %m, %lgp
ret i1 %cmp
}
@@ -374,9 +356,9 @@ define i1 @two_nonnull_mallocs() {
; CHECK-LABEL: @two_nonnull_mallocs(
; CHECK-NEXT: ret i1 false
;
- %m = call nonnull i8* @malloc(i64 4)
- %n = call nonnull i8* @malloc(i64 4)
- %cmp = icmp eq i8* %m, %n
+ %m = call nonnull ptr @malloc(i64 4)
+ %n = call nonnull ptr @malloc(i64 4)
+ %cmp = icmp eq ptr %m, %n
ret i1 %cmp
}
@@ -384,32 +366,32 @@ define i1 @two_nonnull_mallocs() {
; the comparison non-equal.
define i1 @two_nonnull_mallocs2() {
; CHECK-LABEL: @two_nonnull_mallocs2(
-; CHECK-NEXT: [[N:%.*]] = call nonnull dereferenceable(4) i8* @malloc(i64 4)
-; CHECK-NEXT: call void @unknown(i8* nonnull [[N]])
+; CHECK-NEXT: [[N:%.*]] = call nonnull dereferenceable(4) ptr @malloc(i64 4)
+; CHECK-NEXT: call void @unknown(ptr nonnull [[N]])
; CHECK-NEXT: ret i1 false
;
- %m = call nonnull i8* @malloc(i64 4)
- %n = call nonnull i8* @malloc(i64 4)
- call void @unknown(i8* %n)
- %cmp = icmp eq i8* %m, %n
+ %m = call nonnull ptr @malloc(i64 4)
+ %n = call nonnull ptr @malloc(i64 4)
+ call void @unknown(ptr %n)
+ %cmp = icmp eq ptr %m, %n
ret i1 %cmp
}
; TODO: We can fold this, but don't with the current scheme.
define i1 @two_nonnull_mallocs_hidden() {
; CHECK-LABEL: @two_nonnull_mallocs_hidden(
-; CHECK-NEXT: [[M:%.*]] = call nonnull dereferenceable(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[N:%.*]] = call nonnull dereferenceable(4) i8* @malloc(i64 4)
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, i8* [[M]], i64 1
-; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8* [[N]], i64 2
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[GEP1]], [[GEP2]]
+; CHECK-NEXT: [[M:%.*]] = call nonnull dereferenceable(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[N:%.*]] = call nonnull dereferenceable(4) ptr @malloc(i64 4)
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[M]], i64 1
+; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, ptr [[N]], i64 2
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[GEP1]], [[GEP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
- %m = call nonnull i8* @malloc(i64 4)
- %n = call nonnull i8* @malloc(i64 4)
- %gep1 = getelementptr i8, i8* %m, i32 1
- %gep2 = getelementptr i8, i8* %n, i32 2
- %cmp = icmp eq i8* %gep1, %gep2
+ %m = call nonnull ptr @malloc(i64 4)
+ %n = call nonnull ptr @malloc(i64 4)
+ %gep1 = getelementptr i8, ptr %m, i32 1
+ %gep2 = getelementptr i8, ptr %n, i32 2
+ %cmp = icmp eq ptr %gep1, %gep2
ret i1 %cmp
}
diff --git a/llvm/test/Transforms/InstCombine/gepphigep.ll b/llvm/test/Transforms/InstCombine/gepphigep.ll
index b4a566085cd4..22c0e5af7f41 100644
--- a/llvm/test/Transforms/InstCombine/gepphigep.ll
+++ b/llvm/test/Transforms/InstCombine/gepphigep.ll
@@ -1,95 +1,87 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -instcombine-infinite-loop-threshold=3 -S < %s | FileCheck %s
-%struct1 = type { %struct2*, i32, i32, i32 }
+%struct1 = type { ptr, i32, i32, i32 }
%struct2 = type { i32, i32 }
%struct3 = type { i32, %struct4, %struct4 }
%struct4 = type { %struct2, %struct2 }
-define i32 @test1(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
+define i32 @test1(ptr %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT1:%.*]], %struct1* [[DM:%.*]], i64 0, i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = load %struct2*, %struct2** [[TMP]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
; CHECK-NEXT: br i1 [[TMP4:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], %struct2* [[TMP1]], i64 [[TMP9:%.*]], i32 0
-; CHECK-NEXT: store i32 0, i32* [[TMP11]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[TMP1]], i64 [[TMP9:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT2]], %struct2* [[TMP1]], i64 [[TMP19:%.*]], i32 0
-; CHECK-NEXT: store i32 0, i32* [[TMP21]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[TMP1]], i64 [[TMP19:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[TMP20]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP9]], [[BB1]] ], [ [[TMP19]], [[BB2]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT2]], %struct2* [[TMP1]], i64 [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[TMP1]], i64 [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
; CHECK-NEXT: ret i32 [[TMP25]]
;
bb:
- %tmp = getelementptr inbounds %struct1, %struct1* %dm, i64 0, i32 0
- %tmp1 = load %struct2*, %struct2** %tmp, align 8
+ %tmp1 = load ptr, ptr %dm, align 8
br i1 %tmp4, label %bb1, label %bb2
bb1:
- %tmp10 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9
- %tmp11 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 0
- store i32 0, i32* %tmp11, align 4
+ %tmp10 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp9
+ store i32 0, ptr %tmp10, align 4
br label %bb3
bb2:
- %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
- %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
- store i32 0, i32* %tmp21, align 4
+ %tmp20 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp19
+ store i32 0, ptr %tmp20, align 4
br label %bb3
bb3:
- %phi = phi %struct2* [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
- %tmp24 = getelementptr inbounds %struct2, %struct2* %phi, i64 0, i32 1
- %tmp25 = load i32, i32* %tmp24, align 4
+ %phi = phi ptr [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
+ %tmp24 = getelementptr inbounds %struct2, ptr %phi, i64 0, i32 1
+ %tmp25 = load i32, ptr %tmp24, align 4
ret i32 %tmp25
}
-define i32 @test2(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
+define i32 @test2(ptr %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT1:%.*]], %struct1* [[DM:%.*]], i64 0, i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = load %struct2*, %struct2** [[TMP]], align 8
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], %struct2* [[TMP1]], i64 [[TMP9:%.*]], i32 0
-; CHECK-NEXT: store i32 0, i32* [[TMP11]], align 4
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT2]], %struct2* [[TMP1]], i64 [[TMP19:%.*]], i32 0
-; CHECK-NEXT: store i32 0, i32* [[TMP21]], align 4
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT2]], %struct2* [[TMP1]], i64 [[TMP9]], i32 1
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DM:%.*]], align 8
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT2:%.*]], ptr [[TMP1]], i64 [[TMP9:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[TMP1]], i64 [[TMP19:%.*]]
+; CHECK-NEXT: store i32 0, ptr [[TMP20]], align 4
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT2]], ptr [[TMP1]], i64 [[TMP9]], i32 1
+; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
; CHECK-NEXT: ret i32 [[TMP25]]
;
bb:
- %tmp = getelementptr inbounds %struct1, %struct1* %dm, i64 0, i32 0
- %tmp1 = load %struct2*, %struct2** %tmp, align 8
- %tmp10 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9
- %tmp11 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 0
- store i32 0, i32* %tmp11, align 4
- %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
- %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
- store i32 0, i32* %tmp21, align 4
- %tmp24 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 1
- %tmp25 = load i32, i32* %tmp24, align 4
+ %tmp1 = load ptr, ptr %dm, align 8
+ %tmp10 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp9
+ store i32 0, ptr %tmp10, align 4
+ %tmp20 = getelementptr inbounds %struct2, ptr %tmp1, i64 %tmp19
+ store i32 0, ptr %tmp20, align 4
+ %tmp24 = getelementptr inbounds %struct2, ptr %tmp10, i64 0, i32 1
+ %tmp25 = load i32, ptr %tmp24, align 4
ret i32 %tmp25
}
; Check that instcombine doesn't insert GEPs before landingpad.
-define i32 @test3(%struct3* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @test3(ptr %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test3(
; CHECK-NEXT: bb:
; CHECK-NEXT: br i1 [[TMP4:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT3:%.*]], %struct3* [[DM:%.*]], i64 [[TMP19:%.*]], i32 1, i32 0, i32 0
-; CHECK-NEXT: store i32 0, i32* [[TMP11]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT3:%.*]], ptr [[DM:%.*]], i64 [[TMP19:%.*]], i32 1
+; CHECK-NEXT: store i32 0, ptr [[TMP1]], align 4
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT3]], %struct3* [[DM]], i64 [[TMP20:%.*]], i32 1, i32 0, i32 1
-; CHECK-NEXT: store i32 0, i32* [[TMP12]], align 4
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[TMP20:%.*]], i32 1, i32 0, i32 1
+; CHECK-NEXT: store i32 0, ptr [[TMP12]], align 4
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ [[TMP19]], [[BB1]] ], [ [[TMP20]], [[BB2]] ]
@@ -98,45 +90,43 @@ define i32 @test3(%struct3* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i6
; CHECK: bb4:
; CHECK-NEXT: ret i32 0
; CHECK: bb5:
-; CHECK-NEXT: [[TMP27:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT: catch i8* bitcast (i8** @_ZTIi to i8*)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT3]], %struct3* [[DM]], i64 [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT4:%.*]], %struct4* [[TMP1]], i64 [[TMP21:%.*]], i32 1, i32 1
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP35]], align 4
+; CHECK-NEXT: [[TMP27:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: catch ptr @_ZTIi
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT3]], ptr [[DM]], i64 [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT4:%.*]], ptr [[TMP1]], i64 [[TMP21:%.*]], i32 1, i32 1
+; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP35]], align 4
; CHECK-NEXT: ret i32 [[TMP25]]
;
bb:
- %tmp = getelementptr inbounds %struct3, %struct3* %dm, i64 0
br i1 %tmp4, label %bb1, label %bb2
bb1:
- %tmp1 = getelementptr inbounds %struct3, %struct3* %tmp, i64 %tmp19, i32 1
- %tmp11 = getelementptr inbounds %struct4, %struct4* %tmp1, i64 0, i32 0, i32 0
- store i32 0, i32* %tmp11, align 4
+ %tmp1 = getelementptr inbounds %struct3, ptr %dm, i64 %tmp19, i32 1
+ store i32 0, ptr %tmp1, align 4
br label %bb3
bb2:
- %tmp2 = getelementptr inbounds %struct3, %struct3* %tmp, i64 %tmp20, i32 1
- %tmp12 = getelementptr inbounds %struct4, %struct4* %tmp2, i64 0, i32 0, i32 1
- store i32 0, i32* %tmp12, align 4
+ %tmp2 = getelementptr inbounds %struct3, ptr %dm, i64 %tmp20, i32 1
+ %tmp12 = getelementptr inbounds %struct4, ptr %tmp2, i64 0, i32 0, i32 1
+ store i32 0, ptr %tmp12, align 4
br label %bb3
bb3:
- %phi = phi %struct4* [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
+ %phi = phi ptr [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
%tmp22 = invoke i32 @foo1(i32 11) to label %bb4 unwind label %bb5
bb4:
ret i32 0
bb5:
- %tmp27 = landingpad { i8*, i32 } catch i8* bitcast (i8** @_ZTIi to i8*)
- %tmp34 = getelementptr inbounds %struct4, %struct4* %phi, i64 %tmp21, i32 1
- %tmp35 = getelementptr inbounds %struct2, %struct2* %tmp34, i64 0, i32 1
- %tmp25 = load i32, i32* %tmp35, align 4
+ %tmp27 = landingpad { ptr, i32 } catch ptr @_ZTIi
+ %tmp34 = getelementptr inbounds %struct4, ptr %phi, i64 %tmp21, i32 1
+ %tmp35 = getelementptr inbounds %struct2, ptr %tmp34, i64 0, i32 1
+ %tmp25 = load i32, ptr %tmp35, align 4
ret i32 %tmp25
}
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
declare i32 @__gxx_personality_v0(...)
declare i32 @foo1(i32)
@@ -144,7 +134,7 @@ declare i32 @foo1(i32)
; Check that instcombine doesn't fold GEPs into themselves through a loop
; back-edge.
-define i8* @test4(i32 %value, i8* %buffer) {
+define ptr @test4(i32 %value, ptr %buffer) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[VALUE:%.*]], 127
@@ -152,21 +142,21 @@ define i8* @test4(i32 %value, i8* %buffer) {
; CHECK: loop.header:
; CHECK-NEXT: br label [[LOOP_BODY:%.*]]
; CHECK: loop.body:
-; CHECK-NEXT: [[BUFFER_PN:%.*]] = phi i8* [ [[BUFFER:%.*]], [[LOOP_HEADER]] ], [ [[LOOPPTR:%.*]], [[LOOP_BODY]] ]
+; CHECK-NEXT: [[BUFFER_PN:%.*]] = phi ptr [ [[BUFFER:%.*]], [[LOOP_HEADER]] ], [ [[LOOPPTR:%.*]], [[LOOP_BODY]] ]
; CHECK-NEXT: [[NEWVAL:%.*]] = phi i32 [ [[VALUE]], [[LOOP_HEADER]] ], [ [[SHR:%.*]], [[LOOP_BODY]] ]
-; CHECK-NEXT: [[LOOPPTR]] = getelementptr inbounds i8, i8* [[BUFFER_PN]], i64 1
+; CHECK-NEXT: [[LOOPPTR]] = getelementptr inbounds i8, ptr [[BUFFER_PN]], i64 1
; CHECK-NEXT: [[SHR]] = lshr i32 [[NEWVAL]], 7
; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[NEWVAL]], 16383
; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_BODY]], label [[LOOP_EXIT:%.*]]
; CHECK: loop.exit:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i8* [ [[LOOPPTR]], [[LOOP_EXIT]] ], [ [[BUFFER]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[INCPTR3:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 2
-; CHECK-NEXT: ret i8* [[INCPTR3]]
+; CHECK-NEXT: [[TMP0:%.*]] = phi ptr [ [[LOOPPTR]], [[LOOP_EXIT]] ], [ [[BUFFER]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[INCPTR3:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 2
+; CHECK-NEXT: ret ptr [[INCPTR3]]
;
entry:
- %incptr = getelementptr inbounds i8, i8* %buffer, i64 1
+ %incptr = getelementptr inbounds i8, ptr %buffer, i64 1
%cmp = icmp ugt i32 %value, 127
br i1 %cmp, label %loop.header, label %exit
@@ -174,55 +164,55 @@ loop.header:
br label %loop.body
loop.body:
- %loopptr = phi i8* [ %incptr, %loop.header ], [ %incptr2, %loop.body ]
+ %loopptr = phi ptr [ %incptr, %loop.header ], [ %incptr2, %loop.body ]
%newval = phi i32 [ %value, %loop.header ], [ %shr, %loop.body ]
%shr = lshr i32 %newval, 7
- %incptr2 = getelementptr inbounds i8, i8* %loopptr, i64 1
+ %incptr2 = getelementptr inbounds i8, ptr %loopptr, i64 1
%cmp2 = icmp ugt i32 %shr, 127
br i1 %cmp2, label %loop.body, label %loop.exit
loop.exit:
- %exitptr = phi i8* [ %incptr2, %loop.body ]
+ %exitptr = phi ptr [ %incptr2, %loop.body ]
br label %exit
exit:
- %ptr2 = phi i8* [ %exitptr, %loop.exit ], [ %incptr, %entry ]
- %incptr3 = getelementptr inbounds i8, i8* %ptr2, i64 1
- ret i8* %incptr3
+ %ptr2 = phi ptr [ %exitptr, %loop.exit ], [ %incptr, %entry ]
+ %incptr3 = getelementptr inbounds i8, ptr %ptr2, i64 1
+ ret ptr %incptr3
}
@.str.4 = external unnamed_addr constant [100 x i8], align 1
; Instcombine shouldn't add new PHI nodes while folding GEPs if that will leave
; old PHI nodes behind as this is not clearly beneficial.
-define void @test5(i16 *%idx, i8 **%in) #0 {
+define void @test5(ptr %idx, ptr %in) #0 {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i8*, i8** [[IN:%.*]], align 8
-; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 1
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[INCDEC_PTR]], align 1
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[IN:%.*]], align 8
+; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1
; CHECK-NEXT: [[CMP23:%.*]] = icmp eq i8 [[TMP1]], 54
; CHECK-NEXT: br i1 [[CMP23]], label [[WHILE_COND:%.*]], label [[IF_THEN_25:%.*]]
; CHECK: if.then.25:
-; CHECK-NEXT: call void @g(i8* nonnull getelementptr inbounds ([100 x i8], [100 x i8]* @.str.4, i64 0, i64 0))
+; CHECK-NEXT: call void @g(ptr nonnull @.str.4)
; CHECK-NEXT: br label [[WHILE_COND]]
; CHECK: while.cond:
-; CHECK-NEXT: [[PTR:%.*]] = phi i8* [ [[INCDEC_PTR]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR32:%.*]], [[WHILE_BODY:%.*]] ], [ [[INCDEC_PTR]], [[IF_THEN_25]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[PTR]], align 1
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[INCDEC_PTR]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR32:%.*]], [[WHILE_BODY:%.*]] ], [ [[INCDEC_PTR]], [[IF_THEN_25]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[PTR]], align 1
; CHECK-NEXT: [[AND:%.*]] = and i8 [[TMP2]], 64
; CHECK-NEXT: [[LNOT:%.*]] = icmp eq i8 [[AND]], 0
; CHECK-NEXT: br i1 [[LNOT]], label [[WHILE_BODY]], label [[WHILE_COND_33:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[INCDEC_PTR32]] = getelementptr inbounds i8, i8* [[PTR]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR32]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
; CHECK-NEXT: br label [[WHILE_COND]]
; CHECK: while.cond.33:
-; CHECK-NEXT: [[INCDEC_PTR34:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 1
+; CHECK-NEXT: [[INCDEC_PTR34:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
; CHECK-NEXT: br label [[WHILE_COND_57:%.*]]
; CHECK: while.cond.57:
-; CHECK-NEXT: [[TMP3:%.*]] = load i8, i8* [[INCDEC_PTR34]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR34]], align 1
; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
-; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds i16, i16* [[IDX:%.*]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i16, i16* [[ARRAYIDX61]], align 2
+; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds i16, ptr [[IDX:%.*]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX61]], align 2
; CHECK-NEXT: [[AND63:%.*]] = and i16 [[TMP5]], 2048
; CHECK-NEXT: [[TOBOOL64:%.*]] = icmp eq i16 [[AND63]], 0
; CHECK-NEXT: br i1 [[TOBOOL64]], label [[WHILE_COND_73:%.*]], label [[WHILE_COND_57]]
@@ -230,36 +220,36 @@ define void @test5(i16 *%idx, i8 **%in) #0 {
; CHECK-NEXT: br label [[WHILE_COND_73]]
;
entry:
- %0 = load i8*, i8** %in
- %incdec.ptr = getelementptr inbounds i8, i8* %0, i32 1
- %1 = load i8, i8* %incdec.ptr, align 1
+ %0 = load ptr, ptr %in
+ %incdec.ptr = getelementptr inbounds i8, ptr %0, i32 1
+ %1 = load i8, ptr %incdec.ptr, align 1
%cmp23 = icmp eq i8 %1, 54
br i1 %cmp23, label %while.cond, label %if.then.25
if.then.25:
- call void @g(i8* getelementptr inbounds ([100 x i8], [100 x i8]* @.str.4, i32 0, i32 0))
+ call void @g(ptr @.str.4)
br label %while.cond
while.cond:
- %Ptr = phi i8* [ %incdec.ptr, %entry ], [ %incdec.ptr32, %while.body], [%incdec.ptr, %if.then.25 ]
- %2 = load i8, i8* %Ptr
+ %Ptr = phi ptr [ %incdec.ptr, %entry ], [ %incdec.ptr32, %while.body], [%incdec.ptr, %if.then.25 ]
+ %2 = load i8, ptr %Ptr
%and = and i8 %2, 64
%lnot = icmp eq i8 %and, 0
br i1 %lnot, label %while.body, label %while.cond.33
while.body:
- %incdec.ptr32 = getelementptr inbounds i8, i8* %Ptr, i32 1
+ %incdec.ptr32 = getelementptr inbounds i8, ptr %Ptr, i32 1
br label %while.cond
while.cond.33:
- %incdec.ptr34 = getelementptr inbounds i8, i8* %Ptr, i32 1
+ %incdec.ptr34 = getelementptr inbounds i8, ptr %Ptr, i32 1
br label %while.cond.57
while.cond.57:
- %3 = load i8, i8* %incdec.ptr34, align 1
+ %3 = load i8, ptr %incdec.ptr34, align 1
%conv59 = zext i8 %3 to i32
- %arrayidx61 = getelementptr inbounds i16, i16* %idx, i32 %conv59
- %4 = load i16, i16* %arrayidx61, align 2
+ %arrayidx61 = getelementptr inbounds i16, ptr %idx, i32 %conv59
+ %4 = load i16, ptr %arrayidx61, align 2
%and63 = and i16 %4, 2048
%tobool64 = icmp eq i16 %and63, 0
br i1 %tobool64, label %while.cond.73, label %while.cond.57
@@ -268,4 +258,4 @@ while.cond.73:
br label %while.cond.73
}
-declare void @g(i8*)
+declare void @g(ptr)
diff --git a/llvm/test/Transforms/InstCombine/memchr-8.ll b/llvm/test/Transforms/InstCombine/memchr-8.ll
index 7f0228d95827..aefd518fd635 100644
--- a/llvm/test/Transforms/InstCombine/memchr-8.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-8.ll
@@ -6,58 +6,58 @@
; at 64K for the largest supported zeroinitiazer. If the limit changes
; the test might need to be adjusted.
-declare i8* @memrchr(i8*, i32, i64)
+declare ptr @memrchr(ptr, i32, i64)
@a = constant <{ i8, [4294967295 x i8] }> <{ i8 1, [4294967295 x i8] zeroinitializer }>
; Verify reading an initializer INT32_MAX + 1 bytes large (starting at
; offset 2147483647 into a which is UINT32_MAX bytes in size).
-define i8* @call_a_pi32max_p1() {
+define ptr @call_a_pi32max_p1() {
; CHECK-LABEL: @call_a_pi32max_p1(
-; CHECK-NEXT: [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 1, i64 2147483647), i32 0, i64 2147483647)
-; CHECK-NEXT: ret i8* [[CHR]]
+; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 2147483647), i32 0, i64 2147483647)
+; CHECK-NEXT: ret ptr [[CHR]]
;
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i32 2147483647
- %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 2147483647)
- ret i8* %chr
+ %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 2147483647)
+ ret ptr %chr
}
; Verify reading an initializer INT32_MAX bytes large (starting at offset
; 2147483648 into a which is UINT32_MAX bytes in size).
-define i8* @call_a_pi32max() {
+define ptr @call_a_pi32max() {
; CHECK-LABEL: @call_a_pi32max(
-; CHECK-NEXT: [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 1, i64 2147483648), i32 0, i64 2147483647)
-; CHECK-NEXT: ret i8* [[CHR]]
+; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 2147483648), i32 0, i64 2147483647)
+; CHECK-NEXT: ret ptr [[CHR]]
;
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i64 2147483648
- %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 2147483647)
- ret i8* %chr
+ %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 2147483647)
+ ret ptr %chr
}
; Verify reading an initializer UINT32_MAX bytes large (starting at offset
; 1 into a).
-define i8* @call_a_pui32max() {
+define ptr @call_a_pui32max() {
; CHECK-LABEL: @call_a_pui32max(
-; CHECK-NEXT: [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(4294967295) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 1, i64 0), i32 0, i64 4294967295)
-; CHECK-NEXT: ret i8* [[CHR]]
+; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967295) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 0), i32 0, i64 4294967295)
+; CHECK-NEXT: ret ptr [[CHR]]
;
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i32 0
- %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 4294967295)
- ret i8* %chr
+ %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 4294967295)
+ ret ptr %chr
}
; Verify reading an initializer UINT32_MAX + 1 bytes large (all of a).
-define i8* @call_a_puimax_p1() {
+define ptr @call_a_puimax_p1() {
; CHECK-LABEL: @call_a_puimax_p1(
-; CHECK-NEXT: [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(4294967296) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 0), i32 0, i64 4294967296)
-; CHECK-NEXT: ret i8* [[CHR]]
+; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967296) @a, i32 0, i64 4294967296)
+; CHECK-NEXT: ret ptr [[CHR]]
;
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 0
- %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 4294967296)
- ret i8* %chr
+ %chr = tail call ptr @memrchr(ptr %ptr, i32 0, i64 4294967296)
+ ret ptr %chr
}
diff --git a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll
index c0f5e6ff5c6e..d1543696bfc0 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll
@@ -3,152 +3,141 @@
@test.data = private unnamed_addr addrspace(2) constant [8 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7], align 4
-define void @test_load(i32 addrspace(1)* %out, i64 %x) {
+define void @test_load(ptr addrspace(1) %out, i64 %x) {
; CHECK-LABEL: @test_load(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 [[X:%.*]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32 addrspace(2)* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
-; CHECK-NEXT: store i32 [[TMP0]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr [8 x i32], ptr addrspace(2) @test.data, i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
entry:
%data = alloca [8 x i32], align 4
- %0 = bitcast [8 x i32]* %data to i8*
- call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 4 %0, i8 addrspace(2)* align 4 bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
- %1 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
- store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
+ call void @llvm.memcpy.p0.p2.i64(ptr align 4 %data, ptr addrspace(2) align 4 @test.data, i64 32, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], ptr %data, i64 0, i64 %x
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %x
+ store i32 %0, ptr addrspace(1) %arrayidx1, align 4
ret void
}
-define void @test_load_bitcast_chain(i32 addrspace(1)* %out, i64 %x) {
+define void @test_load_bitcast_chain(ptr addrspace(1) %out, i64 %x) {
; CHECK-LABEL: @test_load_bitcast_chain(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr [8 x i32], [8 x i32] addrspace(2)* @test.data, i64 0, i64 [[X:%.*]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32 addrspace(2)* [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
-; CHECK-NEXT: store i32 [[TMP0]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, ptr addrspace(2) @test.data, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
entry:
%data = alloca [8 x i32], align 4
- %0 = bitcast [8 x i32]* %data to i8*
- call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 4 %0, i8 addrspace(2)* align 4 bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
- %1 = bitcast i8* %0 to i32*
- %arrayidx = getelementptr inbounds i32, i32* %1, i64 %x
- %2 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
- store i32 %2, i32 addrspace(1)* %arrayidx1, align 4
+ call void @llvm.memcpy.p0.p2.i64(ptr align 4 %data, ptr addrspace(2) align 4 @test.data, i64 32, i1 false)
+ %arrayidx = getelementptr inbounds i32, ptr %data, i64 %x
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %x
+ store i32 %0, ptr addrspace(1) %arrayidx1, align 4
ret void
}
-define void @test_call(i32 addrspace(1)* %out, i64 %x) {
+define void @test_call(ptr addrspace(1) %out, i64 %x) {
; CHECK-LABEL: @test_call(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(i32* nonnull [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
-; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @foo(ptr nonnull [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
entry:
%data = alloca [8 x i32], align 4
- %0 = bitcast [8 x i32]* %data to i8*
- call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 4 %0, i8 addrspace(2)* align 4 bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
- %1 = call i32 @foo(i32* %arrayidx)
- %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
- store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
+ call void @llvm.memcpy.p0.p2.i64(ptr align 4 %data, ptr addrspace(2) align 4 @test.data, i64 32, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], ptr %data, i64 0, i64 %x
+ %0 = call i32 @foo(ptr %arrayidx)
+ %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %x
+ store i32 %0, ptr addrspace(1) %arrayidx1, align 4
ret void
}
-define void @test_call_no_null_opt(i32 addrspace(1)* %out, i64 %x) #0 {
+define void @test_call_no_null_opt(ptr addrspace(1) %out, i64 %x) #0 {
; CHECK-LABEL: @test_call_no_null_opt(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(i32* [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
-; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
+; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @foo(ptr [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
; CHECK-NEXT: ret void
;
entry:
%data = alloca [8 x i32], align 4
- %0 = bitcast [8 x i32]* %data to i8*
- call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 4 %0, i8 addrspace(2)* align 4 bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
- %1 = call i32 @foo(i32* %arrayidx)
- %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
- store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
+ call void @llvm.memcpy.p0.p2.i64(ptr align 4 %data, ptr addrspace(2) align 4 @test.data, i64 32, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], ptr %data, i64 0, i64 %x
+ %0 = call i32 @foo(ptr %arrayidx)
+ %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %x
+ store i32 %0, ptr addrspace(1) %arrayidx1, align 4
ret void
}
-define void @test_load_and_call(i32 addrspace(1)* %out, i64 %x, i64 %y) {
+define void @test_load_and_call(ptr addrspace(1) %out, i64 %x, i64 %y) {
; CHECK-LABEL: @test_load_and_call(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
-; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @foo(i32* nonnull [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT]], i64 [[Y:%.*]]
-; CHECK-NEXT: store i32 [[TMP2]], i32 addrspace(1)* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(ptr nonnull [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT]], i64 [[Y:%.*]]
+; CHECK-NEXT: store i32 [[TMP1]], ptr addrspace(1) [[ARRAYIDX2]], align 4
; CHECK-NEXT: ret void
;
entry:
%data = alloca [8 x i32], align 4
- %0 = bitcast [8 x i32]* %data to i8*
- call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 4 %0, i8 addrspace(2)* align 4 bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
- %1 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
- store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
- %2 = call i32 @foo(i32* %arrayidx)
- %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %y
- store i32 %2, i32 addrspace(1)* %arrayidx2, align 4
+ call void @llvm.memcpy.p0.p2.i64(ptr align 4 %data, ptr addrspace(2) align 4 @test.data, i64 32, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], ptr %data, i64 0, i64 %x
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %x
+ store i32 %0, ptr addrspace(1) %arrayidx1, align 4
+ %1 = call i32 @foo(ptr %arrayidx)
+ %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %y
+ store i32 %1, ptr addrspace(1) %arrayidx2, align 4
ret void
}
-define void @test_load_and_call_no_null_opt(i32 addrspace(1)* %out, i64 %x, i64 %y) #0 {
+define void @test_load_and_call_no_null_opt(ptr addrspace(1) %out, i64 %x, i64 %y) #0 {
; CHECK-LABEL: @test_load_and_call_no_null_opt(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast [8 x i32]* [[DATA]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* noundef nonnull align 4 dereferenceable(32) [[TMP0]], i8 addrspace(2)* noundef align 4 dereferenceable(32) bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* [[DATA]], i64 0, i64 [[X:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT:%.*]], i64 [[X]]
-; CHECK-NEXT: store i32 [[TMP1]], i32 addrspace(1)* [[ARRAYIDX1]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @foo(i32* [[ARRAYIDX]])
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[OUT]], i64 [[Y:%.*]]
-; CHECK-NEXT: store i32 [[TMP2]], i32 addrspace(1)* [[ARRAYIDX2]], align 4
+; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false)
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]]
+; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(ptr [[ARRAYIDX]])
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT]], i64 [[Y:%.*]]
+; CHECK-NEXT: store i32 [[TMP1]], ptr addrspace(1) [[ARRAYIDX2]], align 4
; CHECK-NEXT: ret void
;
entry:
%data = alloca [8 x i32], align 4
- %0 = bitcast [8 x i32]* %data to i8*
- call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 4 %0, i8 addrspace(2)* align 4 bitcast ([8 x i32] addrspace(2)* @test.data to i8 addrspace(2)*), i64 32, i1 false)
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %data, i64 0, i64 %x
- %1 = load i32, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %x
- store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
- %2 = call i32 @foo(i32* %arrayidx)
- %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %y
- store i32 %2, i32 addrspace(1)* %arrayidx2, align 4
+ call void @llvm.memcpy.p0.p2.i64(ptr align 4 %data, ptr addrspace(2) align 4 @test.data, i64 32, i1 false)
+ %arrayidx = getelementptr inbounds [8 x i32], ptr %data, i64 0, i64 %x
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %x
+ store i32 %0, ptr addrspace(1) %arrayidx1, align 4
+ %1 = call i32 @foo(ptr %arrayidx)
+ %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %y
+ store i32 %1, ptr addrspace(1) %arrayidx2, align 4
ret void
}
-declare void @llvm.memcpy.p0i8.p2i8.i64(i8* nocapture writeonly, i8 addrspace(2)* nocapture readonly, i64, i1)
-declare i32 @foo(i32* %x)
+declare void @llvm.memcpy.p0.p2.i64(ptr nocapture writeonly, ptr addrspace(2) nocapture readonly, i64, i1)
+declare i32 @foo(ptr %x)
attributes #0 = { null_pointer_is_valid }
diff --git a/llvm/test/Transforms/InstCombine/memrchr-4.ll b/llvm/test/Transforms/InstCombine/memrchr-4.ll
index c383d433a10a..54dde6bb9818 100644
--- a/llvm/test/Transforms/InstCombine/memrchr-4.ll
+++ b/llvm/test/Transforms/InstCombine/memrchr-4.ll
@@ -4,7 +4,7 @@
; Verify that memrchr calls with a string consisting of all the same
; characters are folded and those with mixed strings are not.
-declare i8* @memrchr(i8*, i32, i64)
+declare ptr @memrchr(ptr, i32, i64)
@a11111 = constant [5 x i8] c"\01\01\01\01\01"
@a1110111 = constant [7 x i8] c"\01\01\01\00\01\01\01"
@@ -12,94 +12,88 @@ declare i8* @memrchr(i8*, i32, i64)
; Fold memrchr(a11111, C, 5) to *a11111 == C ? a11111 + 5 - 1 : null.
-define i8* @fold_memrchr_a11111_c_5(i32 %C) {
+define ptr @fold_memrchr_a11111_c_5(i32 %C) {
; CHECK-LABEL: @fold_memrchr_a11111_c_5(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 1
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], i8* getelementptr inbounds ([5 x i8], [5 x i8]* @a11111, i64 0, i64 4), i8* null
-; CHECK-NEXT: ret i8* [[MEMRCHR_SEL]]
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([5 x i8], ptr @a11111, i64 0, i64 4), ptr null
+; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
- %ptr = getelementptr [5 x i8], [5 x i8]* @a11111, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 5)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a11111, i32 %C, i64 5)
+ ret ptr %ret
}
; Fold memrchr(a11111, C, N) to N && *a11111 == C ? a11111 + N - 1 : null,
; on the assumption that N is in bounds.
-define i8* @fold_memrchr_a11111_c_n(i32 %C, i64 %N) {
+define ptr @fold_memrchr_a11111_c_n(i32 %C, i64 %N) {
; CHECK-LABEL: @fold_memrchr_a11111_c_n(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[N:%.*]], 0
; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], 1
; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP1]], i1 [[TMP3]], i1 false
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[N]], -1
-; CHECK-NEXT: [[MEMRCHR_PTR_PLUS:%.*]] = getelementptr inbounds [5 x i8], [5 x i8]* @a11111, i64 0, i64 [[TMP5]]
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP4]], i8* [[MEMRCHR_PTR_PLUS]], i8* null
-; CHECK-NEXT: ret i8* [[MEMRCHR_SEL]]
+; CHECK-NEXT: [[MEMRCHR_PTR_PLUS:%.*]] = getelementptr inbounds i8, ptr @a11111, i64 [[TMP5]]
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP4]], ptr [[MEMRCHR_PTR_PLUS]], ptr null
+; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
- %ptr = getelementptr [5 x i8], [5 x i8]* @a11111, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 %N)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a11111, i32 %C, i64 %N)
+ ret ptr %ret
}
; Fold memrchr(a1110111, C, 3) to a1110111[2] == C ? a1110111 + 2 : null.
-define i8* @fold_memrchr_a1110111_c_3(i32 %C) {
+define ptr @fold_memrchr_a1110111_c_3(i32 %C) {
; CHECK-LABEL: @fold_memrchr_a1110111_c_3(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 1
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], i8* getelementptr inbounds ([7 x i8], [7 x i8]* @a1110111, i64 0, i64 2), i8* null
-; CHECK-NEXT: ret i8* [[MEMRCHR_SEL]]
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([7 x i8], ptr @a1110111, i64 0, i64 2), ptr null
+; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
- %ptr = getelementptr [7 x i8], [7 x i8]* @a1110111, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 3)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a1110111, i32 %C, i64 3)
+ ret ptr %ret
}
; Don't fold memrchr(a1110111, C, 4).
-define i8* @call_memrchr_a1110111_c_4(i32 %C) {
+define ptr @call_memrchr_a1110111_c_4(i32 %C) {
; CHECK-LABEL: @call_memrchr_a1110111_c_4(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([7 x i8], [7 x i8]* @a1110111, i64 0, i64 0), i32 [[C:%.*]], i64 4)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(4) @a1110111, i32 [[C:%.*]], i64 4)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [7 x i8], [7 x i8]* @a1110111, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 4)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a1110111, i32 %C, i64 4)
+ ret ptr %ret
}
; Don't fold memrchr(a1110111, C, 7).
-define i8* @call_memrchr_a1110111_c_7(i32 %C) {
+define ptr @call_memrchr_a1110111_c_7(i32 %C) {
; CHECK-LABEL: @call_memrchr_a1110111_c_7(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(7) getelementptr inbounds ([7 x i8], [7 x i8]* @a1110111, i64 0, i64 0), i32 [[C:%.*]], i64 7)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(7) @a1110111, i32 [[C:%.*]], i64 7)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [7 x i8], [7 x i8]* @a1110111, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 7)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a1110111, i32 %C, i64 7)
+ ret ptr %ret
}
; Don't fold memrchr(a1110111, C, N).
-define i8* @call_memrchr_a1110111_c_n(i32 %C, i64 %N) {
+define ptr @call_memrchr_a1110111_c_n(i32 %C, i64 %N) {
; CHECK-LABEL: @call_memrchr_a1110111_c_n(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @memrchr(i8* nonnull getelementptr inbounds ([7 x i8], [7 x i8]* @a1110111, i64 0, i64 0), i32 [[C:%.*]], i64 [[N:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @memrchr(ptr nonnull @a1110111, i32 [[C:%.*]], i64 [[N:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ptr = getelementptr [7 x i8], [7 x i8]* @a1110111, i64 0, i64 0
- %ret = call i8* @memrchr(i8* %ptr, i32 %C, i64 %N)
- ret i8* %ret
+ %ret = call ptr @memrchr(ptr @a1110111, i32 %C, i64 %N)
+ ret ptr %ret
}
diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index 7388f64bf947..2aca9fc52783 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -338,73 +338,73 @@ define i1 @test14b(i1 %C, i32 %X) {
ret i1 %R
}
-define i32 @test16(i1 %C, i32* %P) {
+define i32 @test16(i1 %C, ptr %P) {
; CHECK-LABEL: @test16(
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
- %P2 = select i1 %C, i32* %P, i32* null
- %V = load i32, i32* %P2
+ %P2 = select i1 %C, ptr %P, ptr null
+ %V = load i32, ptr %P2
ret i32 %V
}
;; It may be legal to load from a null address in a non-zero address space
-define i32 @test16_neg(i1 %C, i32 addrspace(1)* %P) {
+define i32 @test16_neg(i1 %C, ptr addrspace(1) %P) {
; CHECK-LABEL: @test16_neg(
-; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], i32 addrspace(1)* [[P:%.*]], i32 addrspace(1)* null
-; CHECK-NEXT: [[V:%.*]] = load i32, i32 addrspace(1)* [[P2]], align 4
+; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], ptr addrspace(1) [[P:%.*]], ptr addrspace(1) null
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr addrspace(1) [[P2]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
- %P2 = select i1 %C, i32 addrspace(1)* %P, i32 addrspace(1)* null
- %V = load i32, i32 addrspace(1)* %P2
+ %P2 = select i1 %C, ptr addrspace(1) %P, ptr addrspace(1) null
+ %V = load i32, ptr addrspace(1) %P2
ret i32 %V
}
-define i32 @test16_neg2(i1 %C, i32 addrspace(1)* %P) {
+define i32 @test16_neg2(i1 %C, ptr addrspace(1) %P) {
; CHECK-LABEL: @test16_neg2(
-; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], i32 addrspace(1)* null, i32 addrspace(1)* [[P:%.*]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32 addrspace(1)* [[P2]], align 4
+; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], ptr addrspace(1) null, ptr addrspace(1) [[P:%.*]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr addrspace(1) [[P2]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
- %P2 = select i1 %C, i32 addrspace(1)* null, i32 addrspace(1)* %P
- %V = load i32, i32 addrspace(1)* %P2
+ %P2 = select i1 %C, ptr addrspace(1) null, ptr addrspace(1) %P
+ %V = load i32, ptr addrspace(1) %P2
ret i32 %V
}
;; It may be legal to load from a null address with null pointer valid attribute.
-define i32 @test16_no_null_opt(i1 %C, i32* %P) #0 {
+define i32 @test16_no_null_opt(i1 %C, ptr %P) #0 {
; CHECK-LABEL: @test16_no_null_opt(
-; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], i32* [[P:%.*]], i32* null
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P2]], align 4
+; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], ptr [[P:%.*]], ptr null
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P2]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
- %P2 = select i1 %C, i32* %P, i32* null
- %V = load i32, i32* %P2
+ %P2 = select i1 %C, ptr %P, ptr null
+ %V = load i32, ptr %P2
ret i32 %V
}
-define i32 @test16_no_null_opt_2(i1 %C, i32* %P) #0 {
+define i32 @test16_no_null_opt_2(i1 %C, ptr %P) #0 {
; CHECK-LABEL: @test16_no_null_opt_2(
-; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], i32* null, i32* [[P:%.*]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P2]], align 4
+; CHECK-NEXT: [[P2:%.*]] = select i1 [[C:%.*]], ptr null, ptr [[P:%.*]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P2]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
- %P2 = select i1 %C, i32* null, i32* %P
- %V = load i32, i32* %P2
+ %P2 = select i1 %C, ptr null, ptr %P
+ %V = load i32, ptr %P2
ret i32 %V
}
attributes #0 = { null_pointer_is_valid }
-define i1 @test17(i32* %X, i1 %C) {
+define i1 @test17(ptr %X, i1 %C) {
; CHECK-LABEL: @test17(
-; CHECK-NEXT: [[RV1:%.*]] = icmp eq i32* [[X:%.*]], null
+; CHECK-NEXT: [[RV1:%.*]] = icmp eq ptr [[X:%.*]], null
; CHECK-NEXT: [[NOT_C:%.*]] = xor i1 [[C:%.*]], true
; CHECK-NEXT: [[RV:%.*]] = select i1 [[NOT_C]], i1 true, i1 [[RV1]]
; CHECK-NEXT: ret i1 [[RV]]
;
- %R = select i1 %C, i32* %X, i32* null
- %RV = icmp eq i32* %R, null
+ %R = select i1 %C, ptr %X, ptr null
+ %RV = icmp eq ptr %R, null
ret i1 %RV
}
@@ -658,8 +658,8 @@ define i1 @test38(i1 %cond) {
;
%zero = alloca i32
%one = alloca i32
- %ptr = select i1 %cond, i32* %zero, i32* %one
- %isnull = icmp eq i32* %ptr, null
+ %ptr = select i1 %cond, ptr %zero, ptr %one
+ %isnull = icmp eq ptr %ptr, null
ret i1 %isnull
}
@@ -679,8 +679,8 @@ define i1 @test40(i1 %cond) {
%a = alloca i32
%b = alloca i32
%c = alloca i32
- %s = select i1 %cond, i32* %a, i32* %b
- %r = icmp eq i32* %s, %c
+ %s = select i1 %cond, ptr %a, ptr %b
+ %r = icmp eq ptr %s, %c
ret i1 %r
}
@@ -748,34 +748,31 @@ define <vscale x 4 x float> @bitcast_select_bitcast(<vscale x 4 x i1> %icmp, <vs
ret <vscale x 4 x float> %bc2
}
-define void @select_oneuse_bitcast(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32>* %ptr1) {
+define void @select_oneuse_bitcast(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, ptr %ptr1) {
; CHECK-LABEL: @select_oneuse_bitcast(
; CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[C:%.*]], [[D:%.*]]
; CHECK-NEXT: [[SEL1_V:%.*]] = select <vscale x 4 x i1> [[CMP]], <vscale x 4 x float> [[A:%.*]], <vscale x 4 x float> [[B:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <vscale x 4 x i32>* [[PTR1:%.*]] to <vscale x 4 x float>*
-; CHECK-NEXT: store <vscale x 4 x float> [[SEL1_V]], <vscale x 4 x float>* [[TMP1]], align 16
+; CHECK-NEXT: store <vscale x 4 x float> [[SEL1_V]], ptr [[PTR1:%.*]], align 16
; CHECK-NEXT: ret void
;
%cmp = icmp ult <vscale x 4 x i32> %c, %d
%bc1 = bitcast <vscale x 4 x float> %a to <vscale x 4 x i32>
%bc2 = bitcast <vscale x 4 x float> %b to <vscale x 4 x i32>
%sel1 = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %bc1, <vscale x 4 x i32> %bc2
- store <vscale x 4 x i32> %sel1, <vscale x 4 x i32>* %ptr1
+ store <vscale x 4 x i32> %sel1, ptr %ptr1
ret void
}
; Allow select promotion even if there are multiple uses of bitcasted ops.
; Hoisting the selects allows later pattern matching to see that these are min/max ops.
-define void @min_max_bitcast(<4 x float> %a, <4 x float> %b, <4 x i32>* %ptr1, <4 x i32>* %ptr2) {
+define void @min_max_bitcast(<4 x float> %a, <4 x float> %b, ptr %ptr1, ptr %ptr2) {
; CHECK-LABEL: @min_max_bitcast(
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[SEL1_V:%.*]] = select <4 x i1> [[CMP]], <4 x float> [[A]], <4 x float> [[B]]
; CHECK-NEXT: [[SEL2_V:%.*]] = select <4 x i1> [[CMP]], <4 x float> [[B]], <4 x float> [[A]]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32>* [[PTR1:%.*]] to <4 x float>*
-; CHECK-NEXT: store <4 x float> [[SEL1_V]], <4 x float>* [[TMP1]], align 16
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32>* [[PTR2:%.*]] to <4 x float>*
-; CHECK-NEXT: store <4 x float> [[SEL2_V]], <4 x float>* [[TMP2]], align 16
+; CHECK-NEXT: store <4 x float> [[SEL1_V]], ptr [[PTR1:%.*]], align 16
+; CHECK-NEXT: store <4 x float> [[SEL2_V]], ptr [[PTR2:%.*]], align 16
; CHECK-NEXT: ret void
;
%cmp = fcmp olt <4 x float> %a, %b
@@ -783,20 +780,18 @@ define void @min_max_bitcast(<4 x float> %a, <4 x float> %b, <4 x i32>* %ptr1, <
%bc2 = bitcast <4 x float> %b to <4 x i32>
%sel1 = select <4 x i1> %cmp, <4 x i32> %bc1, <4 x i32> %bc2
%sel2 = select <4 x i1> %cmp, <4 x i32> %bc2, <4 x i32> %bc1
- store <4 x i32> %sel1, <4 x i32>* %ptr1
- store <4 x i32> %sel2, <4 x i32>* %ptr2
+ store <4 x i32> %sel1, ptr %ptr1
+ store <4 x i32> %sel2, ptr %ptr2
ret void
}
-define void @min_max_bitcast1(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i32>* %ptr1, <vscale x 4 x i32>* %ptr2) {
+define void @min_max_bitcast1(<vscale x 4 x float> %a, <vscale x 4 x float> %b, ptr %ptr1, ptr %ptr2) {
; CHECK-LABEL: @min_max_bitcast1(
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[SEL1_V:%.*]] = select <vscale x 4 x i1> [[CMP]], <vscale x 4 x float> [[A]], <vscale x 4 x float> [[B]]
; CHECK-NEXT: [[SEL2_V:%.*]] = select <vscale x 4 x i1> [[CMP]], <vscale x 4 x float> [[B]], <vscale x 4 x float> [[A]]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast <vscale x 4 x i32>* [[PTR1:%.*]] to <vscale x 4 x float>*
-; CHECK-NEXT: store <vscale x 4 x float> [[SEL1_V]], <vscale x 4 x float>* [[TMP1]], align 16
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 4 x i32>* [[PTR2:%.*]] to <vscale x 4 x float>*
-; CHECK-NEXT: store <vscale x 4 x float> [[SEL2_V]], <vscale x 4 x float>* [[TMP2]], align 16
+; CHECK-NEXT: store <vscale x 4 x float> [[SEL1_V]], ptr [[PTR1:%.*]], align 16
+; CHECK-NEXT: store <vscale x 4 x float> [[SEL2_V]], ptr [[PTR2:%.*]], align 16
; CHECK-NEXT: ret void
;
%cmp = fcmp olt <vscale x 4 x float> %a, %b
@@ -804,22 +799,22 @@ define void @min_max_bitcast1(<vscale x 4 x float> %a, <vscale x 4 x float> %b,
%bc2 = bitcast <vscale x 4 x float> %b to <vscale x 4 x i32>
%sel1 = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %bc1, <vscale x 4 x i32> %bc2
%sel2 = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %bc2, <vscale x 4 x i32> %bc1
- store <vscale x 4 x i32> %sel1, <vscale x 4 x i32>* %ptr1
- store <vscale x 4 x i32> %sel2, <vscale x 4 x i32>* %ptr2
+ store <vscale x 4 x i32> %sel1, ptr %ptr1
+ store <vscale x 4 x i32> %sel2, ptr %ptr2
ret void
}
; To avoid potential backend problems, we don't do the same transform for other casts.
-define void @truncs_before_selects(<4 x float> %f1, <4 x float> %f2, <4 x i64> %a, <4 x i64> %b, <4 x i32>* %ptr1, <4 x i32>* %ptr2) {
+define void @truncs_before_selects(<4 x float> %f1, <4 x float> %f2, <4 x i64> %a, <4 x i64> %b, ptr %ptr1, ptr %ptr2) {
; CHECK-LABEL: @truncs_before_selects(
; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <4 x float> [[F1:%.*]], [[F2:%.*]]
; CHECK-NEXT: [[BC1:%.*]] = trunc <4 x i64> [[A:%.*]] to <4 x i32>
; CHECK-NEXT: [[BC2:%.*]] = trunc <4 x i64> [[B:%.*]] to <4 x i32>
; CHECK-NEXT: [[SEL1:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[BC1]], <4 x i32> [[BC2]]
; CHECK-NEXT: [[SEL2:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[BC2]], <4 x i32> [[BC1]]
-; CHECK-NEXT: store <4 x i32> [[SEL1]], <4 x i32>* [[PTR1:%.*]], align 16
-; CHECK-NEXT: store <4 x i32> [[SEL2]], <4 x i32>* [[PTR2:%.*]], align 16
+; CHECK-NEXT: store <4 x i32> [[SEL1]], ptr [[PTR1:%.*]], align 16
+; CHECK-NEXT: store <4 x i32> [[SEL2]], ptr [[PTR2:%.*]], align 16
; CHECK-NEXT: ret void
;
%cmp = fcmp olt <4 x float> %f1, %f2
@@ -827,8 +822,8 @@ define void @truncs_before_selects(<4 x float> %f1, <4 x float> %f2, <4 x i64> %
%bc2 = trunc <4 x i64> %b to <4 x i32>
%sel1 = select <4 x i1> %cmp, <4 x i32> %bc1, <4 x i32> %bc2
%sel2 = select <4 x i1> %cmp, <4 x i32> %bc2, <4 x i32> %bc1
- store <4 x i32> %sel1, <4 x i32>* %ptr1, align 16
- store <4 x i32> %sel2, <4 x i32>* %ptr2, align 16
+ store <4 x i32> %sel1, ptr %ptr1, align 16
+ store <4 x i32> %sel2, ptr %ptr2, align 16
ret void
}
@@ -931,28 +926,28 @@ define i32 @test59(i32 %x, i32 %y) {
ret i32 %.and
}
-define i1 @test60(i32 %x, i1* %y) {
+define i1 @test60(i32 %x, ptr %y) {
; CHECK-LABEL: @test60(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
-; CHECK-NEXT: [[LOAD:%.*]] = load i1, i1* [[Y:%.*]], align 1
+; CHECK-NEXT: [[LOAD:%.*]] = load i1, ptr [[Y:%.*]], align 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[X]], 1
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i1 [[LOAD]], i1 [[CMP1]]
; CHECK-NEXT: ret i1 [[SEL]]
;
%cmp = icmp eq i32 %x, 0
- %load = load i1, i1* %y, align 1
+ %load = load i1, ptr %y, align 1
%cmp1 = icmp slt i32 %x, 1
%sel = select i1 %cmp, i1 %load, i1 %cmp1
ret i1 %sel
}
@glbl = constant i32 10
-define i32 @test61(i32* %ptr) {
+define i32 @test61(ptr %ptr) {
; CHECK-LABEL: @test61(
; CHECK-NEXT: ret i32 10
;
- %A = load i32, i32* %ptr
- %B = icmp eq i32* %ptr, @glbl
+ %A = load i32, ptr %ptr
+ %B = icmp eq ptr %ptr, @glbl
%C = select i1 %B, i32 %A, i32 10
ret i32 %C
}
@@ -1008,134 +1003,130 @@ while.body:
; The load here must not be speculated around the select. One side of the
; select is trivially dereferenceable but may have a lower alignment than the
; load does.
-define i32 @test76(i1 %flag, i32* %x) {
+define i32 @test76(i1 %flag, ptr %x) {
; CHECK-LABEL: @test76(
-; CHECK-NEXT: store i32 0, i32* [[X:%.*]], align 4
-; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], i32* @under_aligned, i32* [[X]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: store i32 0, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], ptr @under_aligned, ptr [[X]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
- store i32 0, i32* %x
- %p = select i1 %flag, i32* @under_aligned, i32* %x
- %v = load i32, i32* %p
+ store i32 0, ptr %x
+ %p = select i1 %flag, ptr @under_aligned, ptr %x
+ %v = load i32, ptr %p
ret i32 %v
}
-declare void @scribble_on_i32(i32*)
+declare void @scribble_on_i32(ptr)
; The load here must not be speculated around the select. One side of the
; select is trivially dereferenceable but may have a lower alignment than the
; load does.
-define i32 @test77(i1 %flag, i32* %x) {
+define i32 @test77(i1 %flag, ptr %x) {
; CHECK-LABEL: @test77(
; CHECK-NEXT: [[UNDER_ALIGNED:%.*]] = alloca i32, align 1
-; CHECK-NEXT: call void @scribble_on_i32(i32* nonnull [[UNDER_ALIGNED]])
-; CHECK-NEXT: store i32 0, i32* [[X:%.*]], align 4
-; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], i32* [[UNDER_ALIGNED]], i32* [[X]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: call void @scribble_on_i32(ptr nonnull [[UNDER_ALIGNED]])
+; CHECK-NEXT: store i32 0, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], ptr [[UNDER_ALIGNED]], ptr [[X]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
%under_aligned = alloca i32, align 1
- call void @scribble_on_i32(i32* %under_aligned)
- store i32 0, i32* %x
- %p = select i1 %flag, i32* %under_aligned, i32* %x
- %v = load i32, i32* %p
+ call void @scribble_on_i32(ptr %under_aligned)
+ store i32 0, ptr %x
+ %p = select i1 %flag, ptr %under_aligned, ptr %x
+ %v = load i32, ptr %p
ret i32 %v
}
-define i32 @test78(i1 %flag, i32* %x, i32* %y, i32* %z) {
+define i32 @test78(i1 %flag, ptr %x, ptr %y, ptr %z) {
; Test that we can speculate the loads around the select even when we can't
; fold the load completely away.
; CHECK-LABEL: @test78(
; CHECK-NEXT: entry:
-; CHECK-NEXT: store i32 0, i32* [[X:%.*]], align 4
-; CHECK-NEXT: store i32 0, i32* [[Y:%.*]], align 4
-; CHECK-NEXT: store i32 42, i32* [[Z:%.*]], align 4
-; CHECK-NEXT: [[X_VAL:%.*]] = load i32, i32* [[X]], align 4
-; CHECK-NEXT: [[Y_VAL:%.*]] = load i32, i32* [[Y]], align 4
+; CHECK-NEXT: store i32 0, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[Y:%.*]], align 4
+; CHECK-NEXT: store i32 42, ptr [[Z:%.*]], align 4
+; CHECK-NEXT: [[X_VAL:%.*]] = load i32, ptr [[X]], align 4
+; CHECK-NEXT: [[Y_VAL:%.*]] = load i32, ptr [[Y]], align 4
; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], i32 [[X_VAL]], i32 [[Y_VAL]]
; CHECK-NEXT: ret i32 [[V]]
;
entry:
- store i32 0, i32* %x
- store i32 0, i32* %y
+ store i32 0, ptr %x
+ store i32 0, ptr %y
; Block forwarding by storing to %z which could alias either %x or %y.
- store i32 42, i32* %z
- %p = select i1 %flag, i32* %x, i32* %y
- %v = load i32, i32* %p
+ store i32 42, ptr %z
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i32, ptr %p
ret i32 %v
}
; Test that we can speculate the loads around the select even when we can't
; fold the load completely away.
-define i32 @test78_deref(i1 %flag, i32* dereferenceable(4) align 4 %x, i32* dereferenceable(4) align 4 %y, i32* %z) nofree nosync {
+define i32 @test78_deref(i1 %flag, ptr dereferenceable(4) align 4 %x, ptr dereferenceable(4) align 4 %y, ptr %z) nofree nosync {
; CHECK-LABEL: @test78_deref(
-; CHECK-NEXT: [[X_VAL:%.*]] = load i32, i32* [[X:%.*]], align 4
-; CHECK-NEXT: [[Y_VAL:%.*]] = load i32, i32* [[Y:%.*]], align 4
+; CHECK-NEXT: [[X_VAL:%.*]] = load i32, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[Y_VAL:%.*]] = load i32, ptr [[Y:%.*]], align 4
; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], i32 [[X_VAL]], i32 [[Y_VAL]]
; CHECK-NEXT: ret i32 [[V]]
;
- %p = select i1 %flag, i32* %x, i32* %y
- %v = load i32, i32* %p
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i32, ptr %p
ret i32 %v
}
; The same as @test78 but we can't speculate the load because it can trap
; if under-aligned.
-define i32 @test78_neg(i1 %flag, i32* %x, i32* %y, i32* %z) {
+define i32 @test78_neg(i1 %flag, ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: @test78_neg(
-; CHECK-NEXT: store i32 0, i32* [[X:%.*]], align 4
-; CHECK-NEXT: store i32 0, i32* [[Y:%.*]], align 4
-; CHECK-NEXT: store i32 42, i32* [[Z:%.*]], align 4
-; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], i32* [[X]], i32* [[Y]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P]], align 16
+; CHECK-NEXT: store i32 0, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[Y:%.*]], align 4
+; CHECK-NEXT: store i32 42, ptr [[Z:%.*]], align 4
+; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], ptr [[X]], ptr [[Y]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P]], align 16
; CHECK-NEXT: ret i32 [[V]]
;
- store i32 0, i32* %x
- store i32 0, i32* %y
+ store i32 0, ptr %x
+ store i32 0, ptr %y
; Block forwarding by storing to %z which could alias either %x or %y.
- store i32 42, i32* %z
- %p = select i1 %flag, i32* %x, i32* %y
- %v = load i32, i32* %p, align 16
+ store i32 42, ptr %z
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i32, ptr %p, align 16
ret i32 %v
}
; The same as @test78_deref but we can't speculate the load because
; one of the arguments is not sufficiently dereferenceable.
-define i32 @test78_deref_neg(i1 %flag, i32* dereferenceable(2) %x, i32* dereferenceable(4) %y, i32* %z) nofree nosync {
+define i32 @test78_deref_neg(i1 %flag, ptr dereferenceable(2) %x, ptr dereferenceable(4) %y, ptr %z) nofree nosync {
; CHECK-LABEL: @test78_deref_neg(
-; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], i32* [[X:%.*]], i32* [[Y:%.*]]
-; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT: [[P:%.*]] = select i1 [[FLAG:%.*]], ptr [[X:%.*]], ptr [[Y:%.*]]
+; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
- %p = select i1 %flag, i32* %x, i32* %y
- %v = load i32, i32* %p
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i32, ptr %p
ret i32 %v
}
; Test that we can speculate the loads around the select even when we can't
; fold the load completely away.
-define float @test79(i1 %flag, float* %x, i32* %y, i32* %z) {
+define float @test79(i1 %flag, ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: @test79(
-; CHECK-NEXT: [[X1:%.*]] = bitcast float* [[X:%.*]] to i32*
-; CHECK-NEXT: [[Y1:%.*]] = bitcast i32* [[Y:%.*]] to float*
-; CHECK-NEXT: store i32 0, i32* [[X1]], align 4
-; CHECK-NEXT: store i32 0, i32* [[Y]], align 4
-; CHECK-NEXT: store i32 42, i32* [[Z:%.*]], align 4
-; CHECK-NEXT: [[X_VAL:%.*]] = load float, float* [[X]], align 4
-; CHECK-NEXT: [[Y1_VAL:%.*]] = load float, float* [[Y1]], align 4
-; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], float [[X_VAL]], float [[Y1_VAL]]
+; CHECK-NEXT: store i32 0, ptr [[X:%.*]], align 4
+; CHECK-NEXT: store i32 0, ptr [[Y:%.*]], align 4
+; CHECK-NEXT: store i32 42, ptr [[Z:%.*]], align 4
+; CHECK-NEXT: [[X_VAL:%.*]] = load float, ptr [[X]], align 4
+; CHECK-NEXT: [[Y_VAL:%.*]] = load float, ptr [[Y]], align 4
+; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], float [[X_VAL]], float [[Y_VAL]]
; CHECK-NEXT: ret float [[V]]
;
- %x1 = bitcast float* %x to i32*
- %y1 = bitcast i32* %y to float*
- store i32 0, i32* %x1
- store i32 0, i32* %y
+ store i32 0, ptr %x
+ store i32 0, ptr %y
; Block forwarding by storing to %z which could alias either %x or %y.
- store i32 42, i32* %z
- %p = select i1 %flag, float* %x, float* %y1
- %v = load float, float* %p
+ store i32 42, ptr %z
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load float, ptr %p
ret float %v
}
@@ -1145,20 +1136,20 @@ define i32 @test80(i1 %flag) {
; CHECK-LABEL: @test80(
; CHECK-NEXT: [[X:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[Y:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @scribble_on_i32(i32* nonnull [[X]])
-; CHECK-NEXT: call void @scribble_on_i32(i32* nonnull [[Y]])
-; CHECK-NEXT: [[T:%.*]] = load i32, i32* [[X]], align 4
-; CHECK-NEXT: store i32 [[T]], i32* [[Y]], align 4
+; CHECK-NEXT: call void @scribble_on_i32(ptr nonnull [[X]])
+; CHECK-NEXT: call void @scribble_on_i32(ptr nonnull [[Y]])
+; CHECK-NEXT: [[T:%.*]] = load i32, ptr [[X]], align 4
+; CHECK-NEXT: store i32 [[T]], ptr [[Y]], align 4
; CHECK-NEXT: ret i32 [[T]]
;
%x = alloca i32
%y = alloca i32
- call void @scribble_on_i32(i32* %x)
- call void @scribble_on_i32(i32* %y)
- %t = load i32, i32* %x
- store i32 %t, i32* %y
- %p = select i1 %flag, i32* %x, i32* %y
- %v = load i32, i32* %p
+ call void @scribble_on_i32(ptr %x)
+ call void @scribble_on_i32(ptr %y)
+ %t = load i32, ptr %x
+ store i32 %t, ptr %y
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i32, ptr %p
ret i32 %v
}
@@ -1166,25 +1157,23 @@ define i32 @test80(i1 %flag) {
;
diff erently typed pointers.
define float @test81(i1 %flag) {
; CHECK-LABEL: @test81(
-; CHECK-NEXT: [[X:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[X:%.*]] = alloca float, align 4
; CHECK-NEXT: [[Y:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @scribble_on_i32(i32* nonnull [[X]])
-; CHECK-NEXT: call void @scribble_on_i32(i32* nonnull [[Y]])
-; CHECK-NEXT: [[T:%.*]] = load i32, i32* [[X]], align 4
-; CHECK-NEXT: store i32 [[T]], i32* [[Y]], align 4
+; CHECK-NEXT: call void @scribble_on_i32(ptr nonnull [[X]])
+; CHECK-NEXT: call void @scribble_on_i32(ptr nonnull [[Y]])
+; CHECK-NEXT: [[T:%.*]] = load i32, ptr [[X]], align 4
+; CHECK-NEXT: store i32 [[T]], ptr [[Y]], align 4
; CHECK-NEXT: [[V:%.*]] = bitcast i32 [[T]] to float
; CHECK-NEXT: ret float [[V]]
;
%x = alloca float
%y = alloca i32
- %x1 = bitcast float* %x to i32*
- %y1 = bitcast i32* %y to float*
- call void @scribble_on_i32(i32* %x1)
- call void @scribble_on_i32(i32* %y)
- %t = load i32, i32* %x1
- store i32 %t, i32* %y
- %p = select i1 %flag, float* %x, float* %y1
- %v = load float, float* %p
+ call void @scribble_on_i32(ptr %x)
+ call void @scribble_on_i32(ptr %y)
+ %t = load i32, ptr %x
+ store i32 %t, ptr %y
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load float, ptr %p
ret float %v
}
@@ -1194,118 +1183,100 @@ define i32 @test82(i1 %flag) {
; CHECK-LABEL: @test82(
; CHECK-NEXT: [[X:%.*]] = alloca float, align 4
; CHECK-NEXT: [[Y:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[X1:%.*]] = bitcast float* [[X]] to i32*
-; CHECK-NEXT: [[Y1:%.*]] = bitcast i32* [[Y]] to float*
-; CHECK-NEXT: call void @scribble_on_i32(i32* nonnull [[X1]])
-; CHECK-NEXT: call void @scribble_on_i32(i32* nonnull [[Y]])
-; CHECK-NEXT: [[T:%.*]] = load float, float* [[X]], align 4
-; CHECK-NEXT: store float [[T]], float* [[Y1]], align 4
+; CHECK-NEXT: call void @scribble_on_i32(ptr nonnull [[X]])
+; CHECK-NEXT: call void @scribble_on_i32(ptr nonnull [[Y]])
+; CHECK-NEXT: [[T:%.*]] = load float, ptr [[X]], align 4
+; CHECK-NEXT: store float [[T]], ptr [[Y]], align 4
; CHECK-NEXT: [[V:%.*]] = bitcast float [[T]] to i32
; CHECK-NEXT: ret i32 [[V]]
;
%x = alloca float
%y = alloca i32
- %x1 = bitcast float* %x to i32*
- %y1 = bitcast i32* %y to float*
- call void @scribble_on_i32(i32* %x1)
- call void @scribble_on_i32(i32* %y)
- %t = load float, float* %x
- store float %t, float* %y1
- %p = select i1 %flag, i32* %x1, i32* %y
- %v = load i32, i32* %p
+ call void @scribble_on_i32(ptr %x)
+ call void @scribble_on_i32(ptr %y)
+ %t = load float, ptr %x
+ store float %t, ptr %y
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i32, ptr %p
ret i32 %v
}
-declare void @scribble_on_i64(i64*)
-declare void @scribble_on_i128(i128*)
+declare void @scribble_on_i64(ptr)
+declare void @scribble_on_i128(ptr)
; Test that we can speculate the load around the select even though they use
;
diff erently typed pointers and requires inttoptr casts.
-define i8* @test83(i1 %flag) {
+define ptr @test83(i1 %flag) {
; CHECK-LABEL: @test83(
-; CHECK-NEXT: [[X:%.*]] = alloca i8*, align 8
-; CHECK-NEXT: [[Y:%.*]] = alloca i8*, align 8
-; CHECK-NEXT: [[TMPCAST:%.*]] = bitcast i8** [[Y]] to i64*
-; CHECK-NEXT: [[X1:%.*]] = bitcast i8** [[X]] to i64*
-; CHECK-NEXT: call void @scribble_on_i64(i64* nonnull [[X1]])
-; CHECK-NEXT: call void @scribble_on_i64(i64* nonnull [[TMPCAST]])
-; CHECK-NEXT: [[T:%.*]] = load i64, i64* [[X1]], align 8
-; CHECK-NEXT: store i64 [[T]], i64* [[TMPCAST]], align 8
-; CHECK-NEXT: [[V:%.*]] = inttoptr i64 [[T]] to i8*
-; CHECK-NEXT: ret i8* [[V]]
-;
- %x = alloca i8*
+; CHECK-NEXT: [[X:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[Y:%.*]] = alloca i64, align 8
+; CHECK-NEXT: call void @scribble_on_i64(ptr nonnull [[X]])
+; CHECK-NEXT: call void @scribble_on_i64(ptr nonnull [[Y]])
+; CHECK-NEXT: [[T:%.*]] = load i64, ptr [[X]], align 8
+; CHECK-NEXT: store i64 [[T]], ptr [[Y]], align 8
+; CHECK-NEXT: [[V:%.*]] = inttoptr i64 [[T]] to ptr
+; CHECK-NEXT: ret ptr [[V]]
+;
+ %x = alloca ptr
%y = alloca i64
- %x1 = bitcast i8** %x to i64*
- %y1 = bitcast i64* %y to i8**
- call void @scribble_on_i64(i64* %x1)
- call void @scribble_on_i64(i64* %y)
- %t = load i64, i64* %x1
- store i64 %t, i64* %y
- %p = select i1 %flag, i8** %x, i8** %y1
- %v = load i8*, i8** %p
- ret i8* %v
+ call void @scribble_on_i64(ptr %x)
+ call void @scribble_on_i64(ptr %y)
+ %t = load i64, ptr %x
+ store i64 %t, ptr %y
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load ptr, ptr %p
+ ret ptr %v
}
; Test that we can speculate the load around the select even though they use
;
diff erently typed pointers and requires a ptrtoint cast.
define i64 @test84(i1 %flag) {
; CHECK-LABEL: @test84(
-; CHECK-NEXT: [[X:%.*]] = alloca i8*, align 8
-; CHECK-NEXT: [[Y:%.*]] = alloca i8*, align 8
-; CHECK-NEXT: [[TMPCAST:%.*]] = bitcast i8** [[Y]] to i64*
-; CHECK-NEXT: [[X1:%.*]] = bitcast i8** [[X]] to i64*
-; CHECK-NEXT: call void @scribble_on_i64(i64* nonnull [[X1]])
-; CHECK-NEXT: call void @scribble_on_i64(i64* nonnull [[TMPCAST]])
-; CHECK-NEXT: [[T:%.*]] = load i8*, i8** [[X]], align 8
-; CHECK-NEXT: store i8* [[T]], i8** [[Y]], align 8
-; CHECK-NEXT: [[V:%.*]] = ptrtoint i8* [[T]] to i64
+; CHECK-NEXT: [[X:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[Y:%.*]] = alloca i64, align 8
+; CHECK-NEXT: call void @scribble_on_i64(ptr nonnull [[X]])
+; CHECK-NEXT: call void @scribble_on_i64(ptr nonnull [[Y]])
+; CHECK-NEXT: [[T:%.*]] = load ptr, ptr [[X]], align 8
+; CHECK-NEXT: store ptr [[T]], ptr [[Y]], align 8
+; CHECK-NEXT: [[V:%.*]] = ptrtoint ptr [[T]] to i64
; CHECK-NEXT: ret i64 [[V]]
;
- %x = alloca i8*
+ %x = alloca ptr
%y = alloca i64
- %x1 = bitcast i8** %x to i64*
- %y1 = bitcast i64* %y to i8**
- call void @scribble_on_i64(i64* %x1)
- call void @scribble_on_i64(i64* %y)
- %t = load i8*, i8** %x
- store i8* %t, i8** %y1
- %p = select i1 %flag, i64* %x1, i64* %y
- %v = load i64, i64* %p
+ call void @scribble_on_i64(ptr %x)
+ call void @scribble_on_i64(ptr %y)
+ %t = load ptr, ptr %x
+ store ptr %t, ptr %y
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i64, ptr %p
ret i64 %v
}
; Test that we can't speculate the load around the select. The load of the
; pointer doesn't load all of the stored integer bits. We could fix this, but it
; would require endianness checks and other nastiness.
-define i8* @test85(i1 %flag) {
+define ptr @test85(i1 %flag) {
; CHECK-LABEL: @test85(
-; CHECK-NEXT: [[X1:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT: [[X:%.*]] = alloca [2 x ptr], align 8
; CHECK-NEXT: [[Y:%.*]] = alloca i128, align 8
-; CHECK-NEXT: [[X1_SUB:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[X1]], i64 0, i64 0
-; CHECK-NEXT: [[X2:%.*]] = bitcast [2 x i8*]* [[X1]] to i128*
-; CHECK-NEXT: [[Y1:%.*]] = bitcast i128* [[Y]] to i8**
-; CHECK-NEXT: call void @scribble_on_i128(i128* nonnull [[X2]])
-; CHECK-NEXT: call void @scribble_on_i128(i128* nonnull [[Y]])
-; CHECK-NEXT: [[T:%.*]] = load i128, i128* [[X2]], align 8
-; CHECK-NEXT: store i128 [[T]], i128* [[Y]], align 8
-; CHECK-NEXT: [[X1_SUB_VAL:%.*]] = load i8*, i8** [[X1_SUB]], align 8
-; CHECK-NEXT: [[Y1_VAL:%.*]] = load i8*, i8** [[Y1]], align 8
-; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], i8* [[X1_SUB_VAL]], i8* [[Y1_VAL]]
-; CHECK-NEXT: ret i8* [[V]]
-;
- %x = alloca [2 x i8*]
+; CHECK-NEXT: call void @scribble_on_i128(ptr nonnull [[X]])
+; CHECK-NEXT: call void @scribble_on_i128(ptr nonnull [[Y]])
+; CHECK-NEXT: [[T:%.*]] = load i128, ptr [[X]], align 8
+; CHECK-NEXT: store i128 [[T]], ptr [[Y]], align 8
+; CHECK-NEXT: [[X_VAL:%.*]] = load ptr, ptr [[X]], align 8
+; CHECK-NEXT: [[Y_VAL:%.*]] = load ptr, ptr [[Y]], align 8
+; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], ptr [[X_VAL]], ptr [[Y_VAL]]
+; CHECK-NEXT: ret ptr [[V]]
+;
+ %x = alloca [2 x ptr]
%y = alloca i128
- %x1 = bitcast [2 x i8*]* %x to i8**
- %x2 = bitcast i8** %x1 to i128*
- %y1 = bitcast i128* %y to i8**
- call void @scribble_on_i128(i128* %x2)
- call void @scribble_on_i128(i128* %y)
- %t = load i128, i128* %x2
- store i128 %t, i128* %y
- %p = select i1 %flag, i8** %x1, i8** %y1
- %v = load i8*, i8** %p
- ret i8* %v
+ call void @scribble_on_i128(ptr %x)
+ call void @scribble_on_i128(ptr %y)
+ %t = load i128, ptr %x
+ store i128 %t, ptr %y
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load ptr, ptr %p
+ ret ptr %v
}
; Test that we can't speculate the load around the select when the integer size
@@ -1313,31 +1284,25 @@ define i8* @test85(i1 %flag) {
; the bits of the integer.
define i128 @test86(i1 %flag) {
; CHECK-LABEL: @test86(
-; CHECK-NEXT: [[X1:%.*]] = alloca [2 x i8*], align 8
+; CHECK-NEXT: [[X:%.*]] = alloca [2 x ptr], align 8
; CHECK-NEXT: [[Y:%.*]] = alloca i128, align 8
-; CHECK-NEXT: [[X1_SUB:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[X1]], i64 0, i64 0
-; CHECK-NEXT: [[X2:%.*]] = bitcast [2 x i8*]* [[X1]] to i128*
-; CHECK-NEXT: [[Y1:%.*]] = bitcast i128* [[Y]] to i8**
-; CHECK-NEXT: call void @scribble_on_i128(i128* nonnull [[X2]])
-; CHECK-NEXT: call void @scribble_on_i128(i128* nonnull [[Y]])
-; CHECK-NEXT: [[T:%.*]] = load i8*, i8** [[X1_SUB]], align 8
-; CHECK-NEXT: store i8* [[T]], i8** [[Y1]], align 8
-; CHECK-NEXT: [[X2_VAL:%.*]] = load i128, i128* [[X2]], align 8
-; CHECK-NEXT: [[Y_VAL:%.*]] = load i128, i128* [[Y]], align 8
-; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], i128 [[X2_VAL]], i128 [[Y_VAL]]
+; CHECK-NEXT: call void @scribble_on_i128(ptr nonnull [[X]])
+; CHECK-NEXT: call void @scribble_on_i128(ptr nonnull [[Y]])
+; CHECK-NEXT: [[T:%.*]] = load ptr, ptr [[X]], align 8
+; CHECK-NEXT: store ptr [[T]], ptr [[Y]], align 8
+; CHECK-NEXT: [[X_VAL:%.*]] = load i128, ptr [[X]], align 8
+; CHECK-NEXT: [[Y_VAL:%.*]] = load i128, ptr [[Y]], align 8
+; CHECK-NEXT: [[V:%.*]] = select i1 [[FLAG:%.*]], i128 [[X_VAL]], i128 [[Y_VAL]]
; CHECK-NEXT: ret i128 [[V]]
;
- %x = alloca [2 x i8*]
+ %x = alloca [2 x ptr]
%y = alloca i128
- %x1 = bitcast [2 x i8*]* %x to i8**
- %x2 = bitcast i8** %x1 to i128*
- %y1 = bitcast i128* %y to i8**
- call void @scribble_on_i128(i128* %x2)
- call void @scribble_on_i128(i128* %y)
- %t = load i8*, i8** %x1
- store i8* %t, i8** %y1
- %p = select i1 %flag, i128* %x2, i128* %y
- %v = load i128, i128* %p
+ call void @scribble_on_i128(ptr %x)
+ call void @scribble_on_i128(ptr %y)
+ %t = load ptr, ptr %x
+ store ptr %t, ptr %y
+ %p = select i1 %flag, ptr %x, ptr %y
+ %v = load i128, ptr %p
ret i128 %v
}
@@ -1392,28 +1357,28 @@ define i32 @PR23757_swapped(i32 %x) {
ret i32 %sel
}
-define i32 @PR23757_ne(i32 %x, i1* %p) {
+define i32 @PR23757_ne(i32 %x, ptr %p) {
; CHECK-LABEL: @PR23757_ne(
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[X:%.*]], 2147483647
-; CHECK-NEXT: store i1 [[CMP]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[CMP]], ptr [[P:%.*]], align 1
; CHECK-NEXT: ret i32 -2147483648
;
%cmp = icmp ne i32 %x, 2147483647
- store i1 %cmp, i1* %p ; thwart predicate canonicalization
+ store i1 %cmp, ptr %p ; thwart predicate canonicalization
%add = add nsw i32 %x, 1
%sel = select i1 %cmp, i32 -2147483648, i32 %add
ret i32 %sel
}
-define i32 @PR23757_ne_swapped(i32 %x, i1* %p) {
+define i32 @PR23757_ne_swapped(i32 %x, ptr %p) {
; CHECK-LABEL: @PR23757_ne_swapped(
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[X:%.*]], 2147483647
-; CHECK-NEXT: store i1 [[CMP]], i1* [[P:%.*]], align 1
+; CHECK-NEXT: store i1 [[CMP]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 1
; CHECK-NEXT: ret i32 [[ADD]]
;
%cmp = icmp ne i32 %x, 2147483647
- store i1 %cmp, i1* %p ; thwart predicate canonicalization
+ store i1 %cmp, ptr %p ; thwart predicate canonicalization
%add = add nsw i32 %x, 1
%sel = select i1 %cmp, i32 %add, i32 -2147483648
ret i32 %sel
@@ -1519,19 +1484,19 @@ define <4 x i32> @undef_elts_in_condition(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @cannot_canonicalize_to_shuffle1(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: @cannot_canonicalize_to_shuffle1(
-; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> bitcast (i4 ptrtoint (i32* @g to i4) to <4 x i1>), <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> bitcast (i4 ptrtoint (ptr @g to i4) to <4 x i1>), <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]
; CHECK-NEXT: ret <4 x i32> [[SEL]]
;
- %sel = select <4 x i1> bitcast (i4 ptrtoint (i32* @g to i4) to <4 x i1>), <4 x i32> %a, <4 x i32> %b
+ %sel = select <4 x i1> bitcast (i4 ptrtoint (ptr @g to i4) to <4 x i1>), <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %sel
}
define <4 x i32> @cannot_canonicalize_to_shuffle2(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: @cannot_canonicalize_to_shuffle2(
-; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> <i1 true, i1 undef, i1 false, i1 icmp sle (i16 ptrtoint (i32* @g to i16), i16 4)>, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> <i1 true, i1 undef, i1 false, i1 icmp sle (i16 ptrtoint (ptr @g to i16), i16 4)>, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]
; CHECK-NEXT: ret <4 x i32> [[SEL]]
;
- %sel = select <4 x i1> <i1 true, i1 undef, i1 false, i1 icmp sle (i16 ptrtoint (i32* @g to i16), i16 4)>, <4 x i32> %a, <4 x i32> %b
+ %sel = select <4 x i1> <i1 true, i1 undef, i1 false, i1 icmp sle (i16 ptrtoint (ptr @g to i16), i16 4)>, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %sel
}
@@ -2203,7 +2168,7 @@ merge:
declare i32 @__gxx_personality_v0(...)
declare i1 @foo()
-define i32 @test_invoke_neg(i32 %x, i32 %y) nounwind uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @test_invoke_neg(i32 %x, i32 %y) nounwind uwtable ssp personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test_invoke_neg(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[COND:%.*]] = invoke i1 @foo()
@@ -2232,7 +2197,7 @@ lpad:
declare i32 @bar()
-define i32 @test_invoke_2_neg(i1 %cond, i32 %x, i32 %y) nounwind uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @test_invoke_2_neg(i1 %cond, i32 %x, i32 %y) nounwind uwtable ssp personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @test_invoke_2_neg(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
@@ -2508,12 +2473,12 @@ exit:
ret i32 %sel
}
-define i32 @test_select_into_phi_not_idom_no_dom_input_1(i1 %cond, i32 %A, i32 %B, i32 *%p) {
+define i32 @test_select_into_phi_not_idom_no_dom_input_1(i1 %cond, i32 %A, i32 %B, ptr %p) {
; CHECK-LABEL: @test_select_into_phi_not_idom_no_dom_input_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; CHECK: if.true:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: if.false:
; CHECK-NEXT: br label [[MERGE]]
@@ -2527,7 +2492,7 @@ entry:
br i1 %cond, label %if.true, label %if.false
if.true:
- %C = load i32, i32* %p
+ %C = load i32, ptr %p
br label %merge
if.false:
@@ -2542,14 +2507,14 @@ exit:
ret i32 %sel
}
-define i32 @test_select_into_phi_not_idom_no_dom_input_2(i1 %cond, i32 %A, i32 %B, i32 *%p) {
+define i32 @test_select_into_phi_not_idom_no_dom_input_2(i1 %cond, i32 %A, i32 %B, ptr %p) {
; CHECK-LABEL: @test_select_into_phi_not_idom_no_dom_input_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
; CHECK: if.true:
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: if.false:
-; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: [[SEL:%.*]] = phi i32 [ [[C]], [[IF_FALSE]] ], [ [[B:%.*]], [[IF_TRUE]] ]
@@ -2564,7 +2529,7 @@ if.true:
br label %merge
if.false:
- %C = load i32, i32* %p
+ %C = load i32, ptr %p
br label %merge
merge:
@@ -2928,15 +2893,15 @@ define i32 @select_replacement_loop2(i32 %arg, i32 %arg2) {
}
; TODO: Dropping the inbounds flag should not be necessary for this fold.
-define i8* @select_replacement_gep_inbounds(i8* %base, i64 %offset) {
+define ptr @select_replacement_gep_inbounds(ptr %base, i64 %offset) {
; CHECK-LABEL: @select_replacement_gep_inbounds(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[OFFSET:%.*]]
-; CHECK-NEXT: ret i8* [[GEP]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]]
+; CHECK-NEXT: ret ptr [[GEP]]
;
%cmp = icmp eq i64 %offset, 0
- %gep = getelementptr inbounds i8, i8* %base, i64 %offset
- %sel = select i1 %cmp, i8* %base, i8* %gep
- ret i8* %sel
+ %gep = getelementptr inbounds i8, ptr %base, i64 %offset
+ %sel = select i1 %cmp, ptr %base, ptr %gep
+ ret ptr %sel
}
define <2 x i1> @partial_true_undef_condval(<2 x i1> %x) {
@@ -3234,22 +3199,22 @@ define <2 x i8> @ne0_is_all_ones_swap_vec_poison(<2 x i8> %x) {
define i64 @udiv_of_select_constexpr(i1 %c, i64 %x) {
; CHECK-LABEL: @udiv_of_select_constexpr(
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i64 [[X:%.*]], i64 ptrtoint (i32* @glbl to i64)
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i64 [[X:%.*]], i64 ptrtoint (ptr @glbl to i64)
; CHECK-NEXT: [[OP:%.*]] = udiv i64 [[SEL]], 3
; CHECK-NEXT: ret i64 [[OP]]
;
- %sel = select i1 %c, i64 %x, i64 ptrtoint (i32* @glbl to i64)
+ %sel = select i1 %c, i64 %x, i64 ptrtoint (ptr @glbl to i64)
%op = udiv i64 %sel, 3
ret i64 %op
}
define i64 @udiv_of_select_constexpr_commuted(i1 %c, i64 %x) {
; CHECK-LABEL: @udiv_of_select_constexpr_commuted(
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i64 ptrtoint (i32* @glbl to i64), i64 [[X:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i64 ptrtoint (ptr @glbl to i64), i64 [[X:%.*]]
; CHECK-NEXT: [[OP:%.*]] = udiv i64 [[SEL]], 3
; CHECK-NEXT: ret i64 [[OP]]
;
- %sel = select i1 %c, i64 ptrtoint (i32* @glbl to i64), i64 %x
+ %sel = select i1 %c, i64 ptrtoint (ptr @glbl to i64), i64 %x
%op = udiv i64 %sel, 3
ret i64 %op
}
diff --git a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
index 9a3ee2e72ed6..f6262fd3c353 100644
--- a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
+++ b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
@@ -2,17 +2,17 @@
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
; Function Attrs: noinline uwtable
-define i32 @foo(i32* nocapture writeonly %arg) {
+define i32 @foo(ptr nocapture writeonly %arg) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[VAR:%.*]] = call i32 @baz()
-; CHECK-NEXT: store i32 [[VAR]], i32* [[ARG:%.*]], align 4
+; CHECK-NEXT: store i32 [[VAR]], ptr [[ARG:%.*]], align 4
; CHECK-NEXT: [[VAR1:%.*]] = call i32 @baz()
; CHECK-NEXT: ret i32 [[VAR1]]
;
bb:
%var = call i32 @baz()
- store i32 %var, i32* %arg, align 4
+ store i32 %var, ptr %arg, align 4
%var1 = call i32 @baz()
ret i32 %var1
}
@@ -49,17 +49,15 @@ define i32 @test() {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[VAR1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[VAR2:%.*]] = bitcast i32* [[VAR]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[VAR2]])
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @foo(i32* nonnull writeonly [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR]])
; CHECK-NEXT: [[VAR4:%.*]] = icmp eq i32 [[VAR3]], 0
; CHECK-NEXT: br i1 [[VAR4]], label [[BB5:%.*]], label [[BB14:%.*]]
; CHECK: bb5:
-; CHECK-NEXT: [[VAR6:%.*]] = bitcast i32* [[VAR1]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[VAR6]])
-; CHECK-NEXT: [[VAR8:%.*]] = load i32, i32* [[VAR]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR1]])
+; CHECK-NEXT: [[VAR8:%.*]] = load i32, ptr [[VAR]], align 4
; CHECK-NEXT: [[VAR9:%.*]] = icmp eq i32 [[VAR8]], 0
-; CHECK-NEXT: [[VAR7:%.*]] = call i32 @foo(i32* nonnull writeonly [[VAR1]])
+; CHECK-NEXT: [[VAR7:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR1]])
; CHECK-NEXT: br i1 [[VAR9]], label [[BB10:%.*]], label [[BB_CRIT_EDGE:%.*]]
; CHECK: bb10:
; CHECK-NEXT: [[VAR11:%.*]] = call i32 @bar()
@@ -68,28 +66,26 @@ define i32 @test() {
; CHECK-NEXT: br label [[BB12]]
; CHECK: bb12:
; CHECK-NEXT: [[VAR13:%.*]] = phi i32 [ [[VAR11]], [[BB10]] ], [ [[VAR7]], [[BB_CRIT_EDGE]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[VAR6]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR1]])
; CHECK-NEXT: br label [[BB14]]
; CHECK: bb14:
; CHECK-NEXT: [[VAR15:%.*]] = phi i32 [ [[VAR13]], [[BB12]] ], [ 0, [[BB:%.*]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[VAR2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[VAR15]]
;
bb:
%var = alloca i32, align 4
%var1 = alloca i32, align 4
- %var2 = bitcast i32* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %var2) #4
- %var3 = call i32 @foo(i32* nonnull writeonly %var)
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var) #4
+ %var3 = call i32 @foo(ptr nonnull writeonly %var)
%var4 = icmp eq i32 %var3, 0
br i1 %var4, label %bb5, label %bb14
bb5: ; preds = %bb
- %var6 = bitcast i32* %var1 to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %var6) #4
- %var8 = load i32, i32* %var, align 4
+ call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var1) #4
+ %var8 = load i32, ptr %var, align 4
%var9 = icmp eq i32 %var8, 0
- %var7 = call i32 @foo(i32* nonnull writeonly %var1)
+ %var7 = call i32 @foo(ptr nonnull writeonly %var1)
br i1 %var9, label %bb10, label %bb_crit_edge
bb10: ; preds = %bb5
@@ -101,17 +97,17 @@ bb_crit_edge:
bb12: ; preds = %bb10, %bb5
%var13 = phi i32 [ %var11, %bb10 ], [ %var7, %bb_crit_edge ]
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %var6) #4
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var1) #4
br label %bb14
bb14: ; preds = %bb12, %bb
%var15 = phi i32 [ %var13, %bb12 ], [ 0, %bb ]
- call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %var2)
+ call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var)
ret i32 %var15
}
-declare i32 @unknown(i32* %dest)
-declare i32 @unknown.as2(i32 addrspace(2)* %dest)
+declare i32 @unknown(ptr %dest)
+declare i32 @unknown.as2(ptr addrspace(2) %dest)
define i32 @sink_write_to_use(i1 %c) {
; CHECK-LABEL: @sink_write_to_use(
@@ -121,12 +117,12 @@ define i32 @sink_write_to_use(i1 %c) {
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull writeonly [[VAR]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull writeonly [[VAR]]) #[[ATTR1:[0-9]+]]
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i32, align 4
- %var3 = call i32 @unknown(i32* writeonly %var) argmemonly nounwind willreturn
+ %var3 = call i32 @unknown(ptr writeonly %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -144,12 +140,12 @@ define i32 @sink_readwrite_to_use(i1 %c) {
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR1]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i32, align 4
- %var3 = call i32 @unknown(i32* %var) argmemonly nounwind willreturn
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -167,14 +163,12 @@ define i32 @sink_bitcast(i1 %c) {
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i8* [[VAR]] to i32*
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[BITCAST]]) #[[ATTR1]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i8, align 8
- %bitcast = bitcast i8* %var to i32*
- %var3 = call i32 @unknown(i32* %bitcast) argmemonly nounwind willreturn
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -188,20 +182,19 @@ use_block:
define i32 @sink_gep1(i1 %c) {
; CHECK-LABEL: @sink_gep1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[VAR1:%.*]] = alloca [2 x i32], align 8
+; CHECK-NEXT: [[VAR:%.*]] = alloca i64, align 8
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VAR1]], i64 0, i64 1
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[GEP]]) #[[ATTR1]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[VAR]], i64 1
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[GEP]]) #[[ATTR1]]
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i64, align 8
- %bitcast = bitcast i64* %var to i32*
- %gep = getelementptr i32, i32* %bitcast, i32 1
- %var3 = call i32 @unknown(i32* %gep) argmemonly nounwind willreturn
+ %gep = getelementptr i32, ptr %var, i32 1
+ %var3 = call i32 @unknown(ptr %gep) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -214,19 +207,17 @@ use_block:
define i32 @sink_gep2(i1 %c) {
; CHECK-LABEL: @sink_gep2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[VAR1:%.*]] = alloca [2 x i32], align 8
+; CHECK-NEXT: [[VAR:%.*]] = alloca i64, align 8
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: [[VAR1_SUB:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VAR1]], i64 0, i64 0
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR1_SUB]]) #[[ATTR1]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i64, align 8
- %bitcast = bitcast i64* %var to i32*
- %var3 = call i32 @unknown(i32* %bitcast) argmemonly nounwind willreturn
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -244,14 +235,14 @@ define i32 @sink_addrspacecast(i1 %c) {
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: [[CAST:%.*]] = addrspacecast i32* [[VAR]] to i32 addrspace(2)*
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown.as2(i32 addrspace(2)* [[CAST]]) #[[ATTR1]]
+; CHECK-NEXT: [[CAST:%.*]] = addrspacecast ptr [[VAR]] to ptr addrspace(2)
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown.as2(ptr addrspace(2) [[CAST]]) #[[ATTR1]]
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i32, align 8
- %cast = addrspacecast i32* %var to i32 addrspace(2)*
- %var3 = call i32 @unknown.as2(i32 addrspace(2)* %cast) argmemonly nounwind willreturn
+ %cast = addrspacecast ptr %var to ptr addrspace(2)
+ %var3 = call i32 @unknown.as2(ptr addrspace(2) %cast) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -265,7 +256,7 @@ define i32 @neg_infinite_loop(i1 %c) {
; CHECK-LABEL: @neg_infinite_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR2:[0-9]+]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -274,7 +265,7 @@ define i32 @neg_infinite_loop(i1 %c) {
;
entry:
%var = alloca i32, align 4
- %var3 = call i32 @unknown(i32* %var) argmemonly nounwind
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind
br i1 %c, label %early_return, label %use_block
early_return:
@@ -288,7 +279,7 @@ define i32 @neg_throw(i1 %c) {
; CHECK-LABEL: @neg_throw(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -297,7 +288,7 @@ define i32 @neg_throw(i1 %c) {
;
entry:
%var = alloca i32, align 4
- %var3 = call i32 @unknown(i32* %var) argmemonly willreturn
+ %var3 = call i32 @unknown(ptr %var) argmemonly willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -311,7 +302,7 @@ define i32 @neg_unknown_write(i1 %c) {
; CHECK-LABEL: @neg_unknown_write(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -320,7 +311,7 @@ define i32 @neg_unknown_write(i1 %c) {
;
entry:
%var = alloca i32, align 4
- %var3 = call i32 @unknown(i32* %var) nounwind willreturn
+ %var3 = call i32 @unknown(ptr %var) nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -334,28 +325,26 @@ define i32 @sink_lifetime1(i1 %c) {
; CHECK-LABEL: @sink_lifetime1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[VAR]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[BITCAST]])
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR1]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[BITCAST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
- %var3 = call i32 @unknown(i32* %var) argmemonly nounwind willreturn
+ call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
ret i32 0
use_block:
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %var)
ret i32 %var3
}
@@ -363,27 +352,25 @@ define i32 @sink_lifetime2(i1 %c) {
; CHECK-LABEL: @sink_lifetime2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[VAR]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[BITCAST]])
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR1]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[MERGE:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: merge:
; CHECK-NEXT: [[RET:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAR3]], [[USE_BLOCK]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[BITCAST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[RET]]
; CHECK: use_block:
; CHECK-NEXT: br label [[MERGE]]
;
entry:
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
- %var3 = call i32 @unknown(i32* %var) argmemonly nounwind willreturn
+ call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %merge, label %use_block
merge:
%ret = phi i32 [0, %entry], [%var3, %use_block]
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %var)
ret i32 %ret
use_block:
@@ -398,16 +385,15 @@ define i32 @sink_lifetime3(i1 %c) {
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR1]]
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(i64 4, ptr %var)
; If unknown accesses %var, that's UB
- %var3 = call i32 @unknown(i32* %var) argmemonly nounwind willreturn
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
early_return:
@@ -421,10 +407,9 @@ define i32 @sink_lifetime4a(i1 %c) {
; CHECK-LABEL: @sink_lifetime4a(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[VAR]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[BITCAST]])
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull [[VAR]]) #[[ATTR1]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[BITCAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -433,10 +418,9 @@ define i32 @sink_lifetime4a(i1 %c) {
;
entry:
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
- %var3 = call i32 @unknown(i32* %var) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ %var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
+ call void @llvm.lifetime.end.p0(i64 4, ptr %var)
br i1 %c, label %early_return, label %use_block
early_return:
@@ -452,10 +436,9 @@ define i32 @sink_lifetime4b(i1 %c) {
; CHECK-LABEL: @sink_lifetime4b(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[VAR]] to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[BITCAST]])
-; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(i32* nonnull writeonly [[VAR]]) #[[ATTR1]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[BITCAST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull writeonly [[VAR]]) #[[ATTR1]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -464,10 +447,9 @@ define i32 @sink_lifetime4b(i1 %c) {
;
entry:
%var = alloca i32, align 4
- %bitcast = bitcast i32* %var to i8*
- call void @llvm.lifetime.start.p0i8(i64 4, i8* %bitcast)
- %var3 = call i32 @unknown(i32* writeonly %var) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0i8(i64 4, i8* %bitcast)
+ call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ %var3 = call i32 @unknown(ptr writeonly %var) argmemonly nounwind willreturn
+ call void @llvm.lifetime.end.p0(i64 4, ptr %var)
br i1 %c, label %early_return, label %use_block
early_return:
@@ -481,8 +463,8 @@ define i32 @sink_atomicrmw_to_use(i1 %c) {
; CHECK-LABEL: @sink_atomicrmw_to_use(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: store i32 0, i32* [[VAR]], align 4
-; CHECK-NEXT: [[VAR3:%.*]] = atomicrmw add i32* [[VAR]], i32 1 seq_cst, align 4
+; CHECK-NEXT: store i32 0, ptr [[VAR]], align 4
+; CHECK-NEXT: [[VAR3:%.*]] = atomicrmw add ptr [[VAR]], i32 1 seq_cst, align 4
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -491,8 +473,8 @@ define i32 @sink_atomicrmw_to_use(i1 %c) {
;
entry:
%var = alloca i32, align 4
- store i32 0, i32* %var
- %var3 = atomicrmw add i32* %var, i32 1 seq_cst, align 4
+ store i32 0, ptr %var
+ %var3 = atomicrmw add ptr %var, i32 1 seq_cst, align 4
br i1 %c, label %early_return, label %use_block
early_return:
@@ -504,6 +486,6 @@ use_block:
declare i32 @bar()
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
diff --git a/llvm/test/Transforms/InstCombine/stpcpy-1.ll b/llvm/test/Transforms/InstCombine/stpcpy-1.ll
index cb178f3fd072..86691a08a798 100644
--- a/llvm/test/Transforms/InstCombine/stpcpy-1.ll
+++ b/llvm/test/Transforms/InstCombine/stpcpy-1.ll
@@ -11,67 +11,59 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
@b = common global [32 x i8] zeroinitializer, align 1
@percent_s = constant [3 x i8] c"%s\00"
-declare i8* @stpcpy(i8*, i8*)
+declare ptr @stpcpy(ptr, ptr)
-define i8* @test_simplify1() {
+define ptr @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(6) getelementptr inbounds ([32 x i8], [32 x i8]* @a, i32 0, i32 0), i8* noundef nonnull align 1 dereferenceable(6) getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i32 6, i1 false)
-; CHECK-NEXT: ret i8* getelementptr inbounds ([32 x i8], [32 x i8]* @a, i32 0, i32 5)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(6) @a, ptr noundef nonnull align 1 dereferenceable(6) @hello, i32 6, i1 false)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([32 x i8], ptr @a, i32 0, i32 5)
;
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %ret = call i8* @stpcpy(i8* %dst, i8* %src)
- ret i8* %ret
+ %ret = call ptr @stpcpy(ptr @a, ptr @hello)
+ ret ptr %ret
}
-define i8* @test_simplify2() {
+define ptr @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
-; CHECK-NEXT: [[STRLEN:%.*]] = call i32 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([32 x i8], [32 x i8]* @a, i32 0, i32 0))
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [32 x i8], [32 x i8]* @a, i32 0, i32 [[STRLEN]]
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: [[STRLEN:%.*]] = call i32 @strlen(ptr noundef nonnull dereferenceable(1) @a)
+; CHECK-NEXT: [[RET:%.*]] = getelementptr inbounds i8, ptr @a, i32 [[STRLEN]]
+; CHECK-NEXT: ret ptr [[RET]]
;
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %ret = call i8* @stpcpy(i8* %dst, i8* %dst)
- ret i8* %ret
+ %ret = call ptr @stpcpy(ptr @a, ptr @a)
+ ret ptr %ret
}
-define void @test_simplify3(i8* %dst) {
+define void @test_simplify3(ptr %dst) {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(6) [[DST:%.*]], i8* noundef nonnull align 1 dereferenceable(6) getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i32 6, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(6) [[DST:%.*]], ptr noundef nonnull align 1 dereferenceable(6) @hello, i32 6, i1 false)
; CHECK-NEXT: ret void
;
- %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- call i8* @stpcpy(i8* dereferenceable(80) %dst, i8* %src)
+ call ptr @stpcpy(ptr dereferenceable(80) %dst, ptr @hello)
ret void
}
-define i8* @test_no_simplify1() {
+define ptr @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @stpcpy(i8* nonnull getelementptr inbounds ([32 x i8], [32 x i8]* @a, i32 0, i32 0), i8* nonnull getelementptr inbounds ([32 x i8], [32 x i8]* @b, i32 0, i32 0))
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @stpcpy(ptr nonnull @a, ptr nonnull @b)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [32 x i8], [32 x i8]* @b, i32 0, i32 0
- %ret = call i8* @stpcpy(i8* %dst, i8* %src)
- ret i8* %ret
+ %ret = call ptr @stpcpy(ptr @a, ptr @b)
+ ret ptr %ret
}
-define i8* @test_no_simplify2(i8* %dst, i8* %src) {
+define ptr @test_no_simplify2(ptr %dst, ptr %src) {
; CHECK-LABEL: @test_no_simplify2(
-; CHECK-NEXT: [[RET:%.*]] = musttail call i8* @stpcpy(i8* [[DST:%.*]], i8* [[SRC:%.*]])
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = musttail call ptr @stpcpy(ptr [[DST:%.*]], ptr [[SRC:%.*]])
+; CHECK-NEXT: ret ptr [[RET]]
;
- %ret = musttail call i8* @stpcpy(i8* %dst, i8* %src)
- ret i8* %ret
+ %ret = musttail call ptr @stpcpy(ptr %dst, ptr %src)
+ ret ptr %ret
}
-define i8* @test_no_incompatible_attr() {
+define ptr @test_no_incompatible_attr() {
; CHECK-LABEL: @test_no_incompatible_attr(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(6) getelementptr inbounds ([32 x i8], [32 x i8]* @a, i32 0, i32 0), i8* noundef nonnull align 1 dereferenceable(6) getelementptr inbounds ([6 x i8], [6 x i8]* @hello, i32 0, i32 0), i32 6, i1 false)
-; CHECK-NEXT: ret i8* getelementptr inbounds ([32 x i8], [32 x i8]* @a, i32 0, i32 5)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(6) @a, ptr noundef nonnull align 1 dereferenceable(6) @hello, i32 6, i1 false)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([32 x i8], ptr @a, i32 0, i32 5)
;
- %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
- %ret = call dereferenceable(1) i8* @stpcpy(i8* %dst, i8* %src)
- ret i8* %ret
+ %ret = call dereferenceable(1) ptr @stpcpy(ptr @a, ptr @hello)
+ ret ptr %ret
}
diff --git a/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll b/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll
index 73a354ddcc03..5ebd9fae7620 100644
--- a/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll
+++ b/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll
@@ -12,138 +12,119 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
; Check cases where slen >= strlen (src).
-define i8* @test_simplify1() {
+define ptr @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 12, i1 false)
-; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 11)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
- %ret = call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 60)
- ret i8* %ret
+ %ret = call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 60)
+ ret ptr %ret
}
-define i8* @test_simplify2() {
+define ptr @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 12, i1 false)
-; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 11)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
- %ret = call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 12)
- ret i8* %ret
+ %ret = call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 12)
+ ret ptr %ret
}
-define i8* @test_simplify3() {
+define ptr @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 12, i1 false)
-; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 11)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
- %ret = call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 -1)
- ret i8* %ret
+ %ret = call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 -1)
+ ret ptr %ret
}
-define i8* @test_simplify1_tail() {
+define ptr @test_simplify1_tail() {
; CHECK-LABEL: @test_simplify1_tail(
-; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* noundef nonnull align 1 dereferenceable(12) getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 12, i1 false)
-; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 11)
+; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
+; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
- %ret = tail call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 60)
- ret i8* %ret
+ %ret = tail call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 60)
+ ret ptr %ret
}
; Check cases where there are no string constants.
-define i8* @test_simplify4() {
+define ptr @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
-; CHECK-NEXT: [[STPCPY:%.*]] = call i8* @stpcpy(i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @b, i32 0, i32 0))
-; CHECK-NEXT: ret i8* [[STPCPY]]
+; CHECK-NEXT: [[STPCPY:%.*]] = call ptr @stpcpy(ptr nonnull @a, ptr nonnull @b)
+; CHECK-NEXT: ret ptr [[STPCPY]]
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
- %ret = call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 -1)
- ret i8* %ret
+ %ret = call ptr @__stpcpy_chk(ptr @a, ptr @b, i32 -1)
+ ret ptr %ret
}
-define i8* @test_simplify4_tail() {
+define ptr @test_simplify4_tail() {
; CHECK-LABEL: @test_simplify4_tail(
-; CHECK-NEXT: [[STPCPY:%.*]] = tail call i8* @stpcpy(i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @b, i32 0, i32 0))
-; CHECK-NEXT: ret i8* [[STPCPY]]
+; CHECK-NEXT: [[STPCPY:%.*]] = tail call ptr @stpcpy(ptr nonnull @a, ptr nonnull @b)
+; CHECK-NEXT: ret ptr [[STPCPY]]
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
- %ret = tail call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 -1)
- ret i8* %ret
+ %ret = tail call ptr @__stpcpy_chk(ptr @a, ptr @b, i32 -1)
+ ret ptr %ret
}
; Check case where the string length is not constant.
-define i8* @test_simplify5() {
+define ptr @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
-; CHECK-NEXT: [[LEN:%.*]] = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i1 false, i1 false, i1 false)
-; CHECK-NEXT: [[TMP1:%.*]] = call i8* @__memcpy_chk(i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* nonnull getelementptr inbounds ([12 x i8], [12 x i8]* @.str, i32 0, i32 0), i32 12, i32 [[LEN]])
-; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 11)
+; CHECK-NEXT: [[LEN:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr @a, i1 false, i1 false, i1 false)
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__memcpy_chk(ptr nonnull @a, ptr nonnull @.str, i32 12, i32 [[LEN]])
+; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
- %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false, i1 false, i1 false)
- %ret = call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 %len)
- ret i8* %ret
+ %len = call i32 @llvm.objectsize.i32.p0(ptr @a, i1 false, i1 false, i1 false)
+ %ret = call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 %len)
+ ret ptr %ret
}
; Check case where the source and destination are the same.
-define i8* @test_simplify6() {
+define ptr @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
-; CHECK-NEXT: [[STRLEN:%.*]] = call i32 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0))
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 [[STRLEN]]
-; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK-NEXT: [[STRLEN:%.*]] = call i32 @strlen(ptr noundef nonnull dereferenceable(1) @a)
+; CHECK-NEXT: [[RET:%.*]] = getelementptr inbounds i8, ptr @a, i32 [[STRLEN]]
+; CHECK-NEXT: ret ptr [[RET]]
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false, i1 false, i1 false)
- %ret = call i8* @__stpcpy_chk(i8* %dst, i8* %dst, i32 %len)
- ret i8* %ret
+ %len = call i32 @llvm.objectsize.i32.p0(ptr @a, i1 false, i1 false, i1 false)
+ %ret = call ptr @__stpcpy_chk(ptr @a, ptr @a, i32 %len)
+ ret ptr %ret
}
; Check cases where there are no string constants, and is a tail call.
-define i8* @test_simplify7() {
+define ptr @test_simplify7() {
; CHECK-LABEL: @test_simplify7(
-; CHECK-NEXT: [[STPCPY:%.*]] = tail call i8* @stpcpy(i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @b, i32 0, i32 0))
-; CHECK-NEXT: ret i8* [[STPCPY]]
+; CHECK-NEXT: [[STPCPY:%.*]] = tail call ptr @stpcpy(ptr nonnull @a, ptr nonnull @b)
+; CHECK-NEXT: ret ptr [[STPCPY]]
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
- %ret = tail call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 -1)
- ret i8* %ret
+ %ret = tail call ptr @__stpcpy_chk(ptr @a, ptr @b, i32 -1)
+ ret ptr %ret
}
; Check case where slen < strlen (src).
-define i8* @test_no_simplify1() {
+define ptr @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
-; CHECK-NEXT: [[RET:%.*]] = call i8* @__stpcpy_chk(i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @a, i32 0, i32 0), i8* nonnull getelementptr inbounds ([60 x i8], [60 x i8]* @b, i32 0, i32 0), i32 8)
-; CHECK-NEXT: ret i8* [[RET]]
+; CHECK-NEXT: [[RET:%.*]] = call ptr @__stpcpy_chk(ptr nonnull @a, ptr nonnull @b, i32 8)
+; CHECK-NEXT: ret ptr [[RET]]
;
- %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
- %ret = call i8* @__stpcpy_chk(i8* %dst, i8* %src, i32 8)
- ret i8* %ret
+ %ret = call ptr @__stpcpy_chk(ptr @a, ptr @b, i32 8)
+ ret ptr %ret
}
-declare i8* @__stpcpy_chk(i8*, i8*, i32) nounwind
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1, i1, i1) nounwind readonly
+declare ptr @__stpcpy_chk(ptr, ptr, i32) nounwind
+declare i32 @llvm.objectsize.i32.p0(ptr, i1, i1, i1) nounwind readonly
More information about the llvm-commits
mailing list