[llvm] 7fbbbfd - [ValueTracking] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 16 03:19:29 PST 2022


Author: Nikita Popov
Date: 2022-12-16T12:19:03+01:00
New Revision: 7fbbbfd63899331a47a0265c45f1a2a2ae6918d4

URL: https://github.com/llvm/llvm-project/commit/7fbbbfd63899331a47a0265c45f1a2a2ae6918d4
DIFF: https://github.com/llvm/llvm-project/commit/7fbbbfd63899331a47a0265c45f1a2a2ae6918d4.diff

LOG: [ValueTracking] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Analysis/ValueTracking/aarch64.irg.ll
    llvm/test/Analysis/ValueTracking/assume-queries-counter.ll
    llvm/test/Analysis/ValueTracking/assume.ll
    llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll
    llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll
    llvm/test/Analysis/ValueTracking/dereferenceable-and-aligned.ll
    llvm/test/Analysis/ValueTracking/func-ptr-lsb.ll
    llvm/test/Analysis/ValueTracking/gep-negative-issue.ll
    llvm/test/Analysis/ValueTracking/get-pointer-base-with-const-off.ll
    llvm/test/Analysis/ValueTracking/invariant.group.ll
    llvm/test/Analysis/ValueTracking/known-bits-from-operator-constexpr.ll
    llvm/test/Analysis/ValueTracking/known-bits-from-range-md.ll
    llvm/test/Analysis/ValueTracking/known-non-equal.ll
    llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
    llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll
    llvm/test/Analysis/ValueTracking/knownzero-shift.ll
    llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
    llvm/test/Analysis/ValueTracking/select-pattern.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/ValueTracking/aarch64.irg.ll b/llvm/test/Analysis/ValueTracking/aarch64.irg.ll
index 9bf81922eb18f..cef2e9edff340 100644
--- a/llvm/test/Analysis/ValueTracking/aarch64.irg.ll
+++ b/llvm/test/Analysis/ValueTracking/aarch64.irg.ll
@@ -2,33 +2,33 @@
 
 ; CHECK-LABEL: define void @checkNonnullIrg()
 define void @checkNonnullIrg() {
-; CHECK:   %[[p:.*]] = call i8* @llvm.aarch64.irg(i8* nonnull
-; CHECK:   call void @use(i8* nonnull %[[p]])
+; CHECK:   %[[p:.*]] = call ptr @llvm.aarch64.irg(ptr nonnull
+; CHECK:   call void @use(ptr nonnull %[[p]])
 entry:
   %0 = alloca i8, align 16
 
-  %p = call i8* @llvm.aarch64.irg(i8* %0, i64 5)
-  call void @use(i8* %p)
+  %p = call ptr @llvm.aarch64.irg(ptr %0, i64 5)
+  call void @use(ptr %p)
 
   ret void
 }
 
 ; CHECK-LABEL: define void @checkNonnullTagp(
-define void @checkNonnullTagp(i8* %tag) {
-; CHECK:  %[[p:.*]] = call i8* @llvm.aarch64.tagp.p0i8(i8* nonnull %a, i8* %tag, i64 1)
-; CHECK:  %[[p2:.*]] = call i8* @llvm.aarch64.tagp.p0i8(i8* nonnull %[[p]], i8* %tag, i64 2)
-; CHECK:  call void @use(i8* nonnull %[[p2]])
+define void @checkNonnullTagp(ptr %tag) {
+; CHECK:  %[[p:.*]] = call ptr @llvm.aarch64.tagp.p0(ptr nonnull %a, ptr %tag, i64 1)
+; CHECK:  %[[p2:.*]] = call ptr @llvm.aarch64.tagp.p0(ptr nonnull %[[p]], ptr %tag, i64 2)
+; CHECK:  call void @use(ptr nonnull %[[p2]])
 entry:
   %a = alloca i8, align 8
 
-  %p = call i8* @llvm.aarch64.tagp.p0i8(i8* %a, i8* %tag, i64 1)
-  %p2 = call i8* @llvm.aarch64.tagp.p0i8(i8* %p, i8* %tag, i64 2)
-  call void @use(i8* %p2)
+  %p = call ptr @llvm.aarch64.tagp.p0(ptr %a, ptr %tag, i64 1)
+  %p2 = call ptr @llvm.aarch64.tagp.p0(ptr %p, ptr %tag, i64 2)
+  call void @use(ptr %p2)
 
   ret void
 }
 
-declare i8* @llvm.aarch64.irg(i8*, i64)
-declare i8* @llvm.aarch64.tagp.p0i8(i8*, i8*, i64)
+declare ptr @llvm.aarch64.irg(ptr, i64)
+declare ptr @llvm.aarch64.tagp.p0(ptr, ptr, i64)
 
-declare void @use(i8*)
+declare void @use(ptr)

diff  --git a/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll b/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll
index 685ae20b2fb78..a7880171daefb 100644
--- a/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll
+++ b/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll
@@ -8,102 +8,102 @@
 declare i1 @get_val()
 declare void @llvm.assume(i1)
 
-define dso_local i1 @test1(i32* readonly %0) {
+define dso_local i1 @test1(ptr readonly %0) {
 ; COUNTER1-LABEL: @test1(
-; COUNTER1-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0:%.*]]) ]
+; COUNTER1-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0:%.*]]) ]
 ; COUNTER1-NEXT:    ret i1 false
 ;
 ; COUNTER2-LABEL: @test1(
-; COUNTER2-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0:%.*]]) ]
-; COUNTER2-NEXT:    [[TMP2:%.*]] = icmp eq i32* [[TMP0]], null
+; COUNTER2-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0:%.*]]) ]
+; COUNTER2-NEXT:    [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
 ; COUNTER2-NEXT:    ret i1 [[TMP2]]
 ;
 ; COUNTER3-LABEL: @test1(
-; COUNTER3-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0:%.*]]) ]
-; COUNTER3-NEXT:    [[TMP2:%.*]] = icmp eq i32* [[TMP0]], null
+; COUNTER3-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0:%.*]]) ]
+; COUNTER3-NEXT:    [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
 ; COUNTER3-NEXT:    ret i1 [[TMP2]]
 ;
-  call void @llvm.assume(i1 true) ["nonnull"(i32* %0)]
-  %2 = icmp eq i32* %0, null
+  call void @llvm.assume(i1 true) ["nonnull"(ptr %0)]
+  %2 = icmp eq ptr %0, null
   ret i1 %2
 }
 
-define dso_local i1 @test2(i32* readonly %0) {
+define dso_local i1 @test2(ptr readonly %0) {
 ; COUNTER1-LABEL: @test2(
-; COUNTER1-NEXT:    [[TMP2:%.*]] = icmp eq i32* [[TMP0:%.*]], null
-; COUNTER1-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0]]) ]
+; COUNTER1-NEXT:    [[TMP2:%.*]] = icmp eq ptr [[TMP0:%.*]], null
+; COUNTER1-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0]]) ]
 ; COUNTER1-NEXT:    ret i1 [[TMP2]]
 ;
 ; COUNTER2-LABEL: @test2(
-; COUNTER2-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0:%.*]]) ]
+; COUNTER2-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0:%.*]]) ]
 ; COUNTER2-NEXT:    ret i1 false
 ;
 ; COUNTER3-LABEL: @test2(
-; COUNTER3-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0:%.*]]) ]
+; COUNTER3-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0:%.*]]) ]
 ; COUNTER3-NEXT:    ret i1 false
 ;
-  %2 = icmp eq i32* %0, null
-  call void @llvm.assume(i1 true) ["nonnull"(i32* %0)]
+  %2 = icmp eq ptr %0, null
+  call void @llvm.assume(i1 true) ["nonnull"(ptr %0)]
   ret i1 %2
 }
 
-define dso_local i32 @test4(i32* readonly %0, i1 %cond) {
+define dso_local i32 @test4(ptr readonly %0, i1 %cond) {
 ; COUNTER1-LABEL: @test4(
-; COUNTER1-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0:%.*]], i32 4) ]
+; COUNTER1-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[TMP0:%.*]], i32 4) ]
 ; COUNTER1-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; COUNTER1:       B:
 ; COUNTER1-NEXT:    br label [[A]]
 ; COUNTER1:       A:
-; COUNTER1-NEXT:    [[TMP2:%.*]] = icmp eq i32* [[TMP0]], null
+; COUNTER1-NEXT:    [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
 ; COUNTER1-NEXT:    br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
 ; COUNTER1:       3:
-; COUNTER1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
+; COUNTER1-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP0]], align 4
 ; COUNTER1-NEXT:    br label [[TMP5]]
 ; COUNTER1:       5:
 ; COUNTER1-NEXT:    [[TMP6:%.*]] = phi i32 [ [[TMP4]], [[TMP3]] ], [ 0, [[A]] ]
 ; COUNTER1-NEXT:    ret i32 [[TMP6]]
 ;
 ; COUNTER2-LABEL: @test4(
-; COUNTER2-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0:%.*]], i32 4) ]
+; COUNTER2-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[TMP0:%.*]], i32 4) ]
 ; COUNTER2-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; COUNTER2:       B:
 ; COUNTER2-NEXT:    br label [[A]]
 ; COUNTER2:       A:
-; COUNTER2-NEXT:    [[TMP2:%.*]] = icmp eq i32* [[TMP0]], null
+; COUNTER2-NEXT:    [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
 ; COUNTER2-NEXT:    br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
 ; COUNTER2:       3:
-; COUNTER2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
+; COUNTER2-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP0]], align 4
 ; COUNTER2-NEXT:    br label [[TMP5]]
 ; COUNTER2:       5:
 ; COUNTER2-NEXT:    [[TMP6:%.*]] = phi i32 [ [[TMP4]], [[TMP3]] ], [ 0, [[A]] ]
 ; COUNTER2-NEXT:    ret i32 [[TMP6]]
 ;
 ; COUNTER3-LABEL: @test4(
-; COUNTER3-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0:%.*]], i32 4) ]
+; COUNTER3-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[TMP0:%.*]], i32 4) ]
 ; COUNTER3-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; COUNTER3:       B:
 ; COUNTER3-NEXT:    br label [[A]]
 ; COUNTER3:       A:
 ; COUNTER3-NEXT:    br i1 false, label [[TMP4:%.*]], label [[TMP2:%.*]]
 ; COUNTER3:       2:
-; COUNTER3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
+; COUNTER3-NEXT:    [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
 ; COUNTER3-NEXT:    br label [[TMP4]]
 ; COUNTER3:       4:
 ; COUNTER3-NEXT:    [[TMP5:%.*]] = phi i32 [ [[TMP3]], [[TMP2]] ], [ 0, [[A]] ]
 ; COUNTER3-NEXT:    ret i32 [[TMP5]]
 ;
-  call void @llvm.assume(i1 true) ["dereferenceable"(i32* %0, i32 4)]
+  call void @llvm.assume(i1 true) ["dereferenceable"(ptr %0, i32 4)]
   br i1 %cond, label %A, label %B
 
 B:
   br label %A
 
 A:
-  %2 = icmp eq i32* %0, null
+  %2 = icmp eq ptr %0, null
   br i1 %2, label %5, label %3
 
 3:                                                ; preds = %1
-  %4 = load i32, i32* %0, align 4
+  %4 = load i32, ptr %0, align 4
   br label %5
 
 5:                                                ; preds = %1, %3

diff  --git a/llvm/test/Analysis/ValueTracking/assume.ll b/llvm/test/Analysis/ValueTracking/assume.ll
index c71d51a7410e9..81c37227e939b 100644
--- a/llvm/test/Analysis/ValueTracking/assume.ll
+++ b/llvm/test/Analysis/ValueTracking/assume.ll
@@ -37,53 +37,53 @@ entry-block:
 declare i1 @get_val()
 declare void @llvm.assume(i1)
 
-define dso_local i1 @test1(i32* readonly %0) {
+define dso_local i1 @test1(ptr readonly %0) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0:%.*]]) ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0:%.*]]) ]
 ; CHECK-NEXT:    ret i1 false
 ;
-  call void @llvm.assume(i1 true) ["nonnull"(i32* %0)]
-  %2 = icmp eq i32* %0, null
+  call void @llvm.assume(i1 true) ["nonnull"(ptr %0)]
+  %2 = icmp eq ptr %0, null
   ret i1 %2
 }
 
-define dso_local i1 @test2(i32* readonly %0) {
+define dso_local i1 @test2(ptr readonly %0) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(i32* [[TMP0:%.*]]) ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[TMP0:%.*]]) ]
 ; CHECK-NEXT:    ret i1 false
 ;
-  %2 = icmp eq i32* %0, null
-  call void @llvm.assume(i1 true) ["nonnull"(i32* %0)]
+  %2 = icmp eq ptr %0, null
+  call void @llvm.assume(i1 true) ["nonnull"(ptr %0)]
   ret i1 %2
 }
 
-define dso_local i32 @test4(i32* readonly %0, i1 %cond) {
+define dso_local i32 @test4(ptr readonly %0, i1 %cond) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0:%.*]], i32 4) ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[TMP0:%.*]], i32 4) ]
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       B:
 ; CHECK-NEXT:    br label [[A]]
 ; CHECK:       A:
 ; CHECK-NEXT:    br i1 false, label [[TMP4:%.*]], label [[TMP2:%.*]]
 ; CHECK:       2:
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
 ; CHECK-NEXT:    br label [[TMP4]]
 ; CHECK:       4:
 ; CHECK-NEXT:    [[TMP5:%.*]] = phi i32 [ [[TMP3]], [[TMP2]] ], [ 0, [[A]] ]
 ; CHECK-NEXT:    ret i32 [[TMP5]]
 ;
-  call void @llvm.assume(i1 true) ["dereferenceable"(i32* %0, i32 4)]
+  call void @llvm.assume(i1 true) ["dereferenceable"(ptr %0, i32 4)]
   br i1 %cond, label %A, label %B
 
 B:
   br label %A
 
 A:
-  %2 = icmp eq i32* %0, null
+  %2 = icmp eq ptr %0, null
   br i1 %2, label %5, label %3
 
 3:                                                ; preds = %1
-  %4 = load i32, i32* %0, align 4
+  %4 = load i32, ptr %0, align 4
   br label %5
 
 5:                                                ; preds = %1, %3
@@ -91,33 +91,33 @@ A:
   ret i32 %6
 }
 
-define dso_local i32 @test4a(i32* readonly %0, i1 %cond) {
+define dso_local i32 @test4a(ptr readonly %0, i1 %cond) {
 ; CHECK-LABEL: @test4a(
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0:%.*]], i32 4), "align"(i32* [[TMP0]], i32 8) ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[TMP0:%.*]], i32 4), "align"(ptr [[TMP0]], i32 8) ]
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       B:
 ; CHECK-NEXT:    br label [[A]]
 ; CHECK:       A:
 ; CHECK-NEXT:    br i1 false, label [[TMP4:%.*]], label [[TMP2:%.*]]
 ; CHECK:       2:
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 8
 ; CHECK-NEXT:    br label [[TMP4]]
 ; CHECK:       4:
 ; CHECK-NEXT:    [[TMP5:%.*]] = phi i32 [ [[TMP3]], [[TMP2]] ], [ 0, [[A]] ]
 ; CHECK-NEXT:    ret i32 [[TMP5]]
 ;
-  call void @llvm.assume(i1 true) ["dereferenceable"(i32* %0, i32 4), "align"(i32* %0, i32 8)]
+  call void @llvm.assume(i1 true) ["dereferenceable"(ptr %0, i32 4), "align"(ptr %0, i32 8)]
   br i1 %cond, label %A, label %B
 
 B:
   br label %A
 
 A:
-  %2 = icmp eq i32* %0, null
+  %2 = icmp eq ptr %0, null
   br i1 %2, label %5, label %3
 
 3:                                                ; preds = %1
-  %4 = load i32, i32* %0, align 4
+  %4 = load i32, ptr %0, align 4
   br label %5
 
 5:                                                ; preds = %1, %3
@@ -125,34 +125,34 @@ A:
   ret i32 %6
 }
 
-define dso_local i32 @test4b(i32* readonly %0, i1 %cond) null_pointer_is_valid {
+define dso_local i32 @test4b(ptr readonly %0, i1 %cond) null_pointer_is_valid {
 ; CHECK-LABEL: @test4b(
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP0:%.*]], i32 4) ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[TMP0:%.*]], i32 4) ]
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[A:%.*]], label [[B:%.*]]
 ; CHECK:       B:
 ; CHECK-NEXT:    br label [[A]]
 ; CHECK:       A:
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32* [[TMP0]], null
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[TMP5:%.*]], label [[TMP3:%.*]]
 ; CHECK:       3:
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP0]], align 4
 ; CHECK-NEXT:    br label [[TMP5]]
 ; CHECK:       5:
 ; CHECK-NEXT:    [[TMP6:%.*]] = phi i32 [ [[TMP4]], [[TMP3]] ], [ 0, [[A]] ]
 ; CHECK-NEXT:    ret i32 [[TMP6]]
 ;
-  call void @llvm.assume(i1 true) ["dereferenceable"(i32* %0, i32 4)]
+  call void @llvm.assume(i1 true) ["dereferenceable"(ptr %0, i32 4)]
   br i1 %cond, label %A, label %B
 
 B:
   br label %A
 
 A:
-  %2 = icmp eq i32* %0, null
+  %2 = icmp eq ptr %0, null
   br i1 %2, label %5, label %3
 
 3:                                                ; preds = %1
-  %4 = load i32, i32* %0, align 4
+  %4 = load i32, ptr %0, align 4
   br label %5
 
 5:                                                ; preds = %1, %3

diff  --git a/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll b/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll
index 985377c93d6e0..1efe4a90ea780 100644
--- a/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll
+++ b/llvm/test/Analysis/ValueTracking/deref-abstract-gc.ll
@@ -9,13 +9,13 @@ target datalayout = "e-i32:32:64"
 ; CHECK: %gc_ptr
 ; CHECK-NOT: %other_ptr
 ; FIXME: Can infer the gc pointer case
-define void @abstract_model(i32 addrspace(1)* dereferenceable(8) %gc_ptr,
-                            i32* dereferenceable(8) %other_ptr)
+define void @abstract_model(ptr addrspace(1) dereferenceable(8) %gc_ptr,
+                            ptr dereferenceable(8) %other_ptr)
     gc "statepoint-example" {
 entry:
   call void @mayfree()
-  load i32, i32 addrspace(1)* %gc_ptr
-  load i32, i32* %other_ptr
+  load i32, ptr addrspace(1) %gc_ptr
+  load i32, ptr %other_ptr
   ret void
 }
 

diff  --git a/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll b/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll
index fbd12f609b49f..672e7da39ee26 100644
--- a/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll
+++ b/llvm/test/Analysis/ValueTracking/deref-bitcast-of-gep.ll
@@ -9,12 +9,11 @@
 
 declare void @use(i32)
 
-define void @f_0(i8* align 4 dereferenceable(1024) %ptr) nofree nosync {
+define void @f_0(ptr align 4 dereferenceable(1024) %ptr) nofree nosync {
 ; CHECK-LABEL: @f_0(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[PTR_GEP:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i32 32
-; CHECK-NEXT:    [[PTR_I32:%.*]] = bitcast i8* [[PTR_GEP]] to i32*
-; CHECK-NEXT:    [[VAL:%.*]] = load i32, i32* [[PTR_I32]], align 4
+; CHECK-NEXT:    [[PTR_GEP:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 32
+; CHECK-NEXT:    [[VAL:%.*]] = load i32, ptr [[PTR_GEP]], align 4
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    call void @use(i32 0)
@@ -24,26 +23,24 @@ define void @f_0(i8* align 4 dereferenceable(1024) %ptr) nofree nosync {
 
 
 entry:
-  %ptr.gep = getelementptr i8, i8* %ptr, i32 32
-  %ptr.i32 = bitcast i8* %ptr.gep to i32*
+  %ptr.gep = getelementptr i8, ptr %ptr, i32 32
   br label %loop
 
 loop:
   call void @use(i32 0)
-  %val = load i32, i32* %ptr.i32, !invariant.load !{}
+  %val = load i32, ptr %ptr.gep, !invariant.load !{}
   call void @use(i32 %val)
   br label %loop
 }
 
-define void @f_1(i8* align 4 dereferenceable_or_null(1024) %ptr) nofree nosync {
+define void @f_1(ptr align 4 dereferenceable_or_null(1024) %ptr) nofree nosync {
 ; CHECK-LABEL: @f_1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[PTR_GEP:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i32 32
-; CHECK-NEXT:    [[PTR_I32:%.*]] = bitcast i8* [[PTR_GEP]] to i32*
-; CHECK-NEXT:    [[PTR_IS_NULL:%.*]] = icmp eq i8* [[PTR]], null
+; CHECK-NEXT:    [[PTR_GEP:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 32
+; CHECK-NEXT:    [[PTR_IS_NULL:%.*]] = icmp eq ptr [[PTR]], null
 ; CHECK-NEXT:    br i1 [[PTR_IS_NULL]], label [[LEAVE:%.*]], label [[LOOP_PREHEADER:%.*]]
 ; CHECK:       loop.preheader:
-; CHECK-NEXT:    [[VAL:%.*]] = load i32, i32* [[PTR_I32]], align 4
+; CHECK-NEXT:    [[VAL:%.*]] = load i32, ptr [[PTR_GEP]], align 4
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    call void @use(i32 0)
@@ -53,15 +50,14 @@ define void @f_1(i8* align 4 dereferenceable_or_null(1024) %ptr) nofree nosync {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %ptr.gep = getelementptr i8, i8* %ptr, i32 32
-  %ptr.i32 = bitcast i8* %ptr.gep to i32*
-  %ptr_is_null = icmp eq i8* %ptr, null
+  %ptr.gep = getelementptr i8, ptr %ptr, i32 32
+  %ptr_is_null = icmp eq ptr %ptr, null
   br i1 %ptr_is_null, label %leave, label %loop
 
 
 loop:
   call void @use(i32 0)
-  %val = load i32, i32* %ptr.i32, !invariant.load !{}
+  %val = load i32, ptr %ptr.gep, !invariant.load !{}
   call void @use(i32 %val)
   br label %loop
 
@@ -69,18 +65,17 @@ leave:
   ret void
 }
 
-define void @f_2(i8* align 4 dereferenceable_or_null(1024) %ptr) {
+define void @f_2(ptr align 4 dereferenceable_or_null(1024) %ptr) {
 ; CHECK-LABEL: @f_2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[PTR_GEP:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i32 30
-; CHECK-NEXT:    [[PTR_I32:%.*]] = bitcast i8* [[PTR_GEP]] to i32*
-; CHECK-NEXT:    [[PTR_IS_NULL:%.*]] = icmp eq i8* [[PTR]], null
+; CHECK-NEXT:    [[PTR_GEP:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 30
+; CHECK-NEXT:    [[PTR_IS_NULL:%.*]] = icmp eq ptr [[PTR]], null
 ; CHECK-NEXT:    br i1 [[PTR_IS_NULL]], label [[LEAVE:%.*]], label [[LOOP_PREHEADER:%.*]]
 ; CHECK:       loop.preheader:
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    call void @use(i32 0)
-; CHECK-NEXT:    [[VAL:%.*]] = load i32, i32* [[PTR_I32]], align 4, !invariant.load !0
+; CHECK-NEXT:    [[VAL:%.*]] = load i32, ptr [[PTR_GEP]], align 4, !invariant.load !0
 ; CHECK-NEXT:    call void @use(i32 [[VAL]])
 ; CHECK-NEXT:    br label [[LOOP]]
 ; CHECK:       leave:
@@ -91,14 +86,13 @@ entry:
   ;; Can't hoist, since the alignment does not work out -- (<4 byte
   ;; aligned> + 30) is not necessarily 4 byte aligned.
 
-  %ptr.gep = getelementptr i8, i8* %ptr, i32 30
-  %ptr.i32 = bitcast i8* %ptr.gep to i32*
-  %ptr_is_null = icmp eq i8* %ptr, null
+  %ptr.gep = getelementptr i8, ptr %ptr, i32 30
+  %ptr_is_null = icmp eq ptr %ptr, null
   br i1 %ptr_is_null, label %leave, label %loop
 
 loop:
   call void @use(i32 0)
-  %val = load i32, i32* %ptr.i32, !invariant.load !{}
+  %val = load i32, ptr %ptr.gep, !invariant.load !{}
   call void @use(i32 %val)
   br label %loop
 
@@ -106,11 +100,11 @@ leave:
   ret void
 }
 
-define void @checkLaunder(i8* align 4 dereferenceable(1024) %p) nofree nosync {
+define void @checkLaunder(ptr align 4 dereferenceable(1024) %p) nofree nosync {
 ; CHECK-LABEL: @checkLaunder(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[L:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* [[P:%.*]])
-; CHECK-NEXT:    [[VAL:%.*]] = load i8, i8* [[L]], align 1
+; CHECK-NEXT:    [[L:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[P:%.*]])
+; CHECK-NEXT:    [[VAL:%.*]] = load i8, ptr [[L]], align 1
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    call void @use(i32 0)
@@ -119,16 +113,16 @@ define void @checkLaunder(i8* align 4 dereferenceable(1024) %p) nofree nosync {
 ;
 
 entry:
-  %l = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+  %l = call ptr @llvm.launder.invariant.group.p0(ptr %p)
   br label %loop
 
 loop:
   call void @use(i32 0)
-  %val = load i8, i8* %l, !invariant.load !{}
+  %val = load i8, ptr %l, !invariant.load !{}
   call void @use8(i8 %val)
   br label %loop
 }
 
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare ptr @llvm.launder.invariant.group.p0(ptr)
 
 declare void @use8(i8)

diff  --git a/llvm/test/Analysis/ValueTracking/dereferenceable-and-aligned.ll b/llvm/test/Analysis/ValueTracking/dereferenceable-and-aligned.ll
index a84f0c81fc086..77b26b2948b9b 100644
--- a/llvm/test/Analysis/ValueTracking/dereferenceable-and-aligned.ll
+++ b/llvm/test/Analysis/ValueTracking/dereferenceable-and-aligned.ll
@@ -11,11 +11,10 @@ bb:
   br label %bb1
 
 bb1:
-  %tmp2 = getelementptr inbounds [256 x i32], [256 x i32]* %tmp, i32 0, i32 36
-  %tmp3 = bitcast i32* %tmp2 to <4 x i32>*
-  %tmp4 = addrspacecast <4 x i32>* %tmp3 to <4 x i32> addrspace(4)*
-  %tmp5 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp4
+  %tmp2 = getelementptr inbounds [256 x i32], ptr %tmp, i32 0, i32 36
+  %tmp4 = addrspacecast ptr %tmp2 to ptr addrspace(4)
+  %tmp5 = load <4 x i32>, ptr addrspace(4) %tmp4
   %tmp6 = xor <4 x i32> %tmp5, undef
-  store <4 x i32> %tmp6, <4 x i32> addrspace(1)* undef
+  store <4 x i32> %tmp6, ptr addrspace(1) undef
   br label %bb1
 }

diff  --git a/llvm/test/Analysis/ValueTracking/func-ptr-lsb.ll b/llvm/test/Analysis/ValueTracking/func-ptr-lsb.ll
index a64167fb05177..557e1e12fd72c 100644
--- a/llvm/test/Analysis/ValueTracking/func-ptr-lsb.ll
+++ b/llvm/test/Analysis/ValueTracking/func-ptr-lsb.ll
@@ -9,7 +9,7 @@ entry:
   ; Even though the address of @foo is aligned, we cannot assume that the
   ; pointer has the same alignment. This is not true for e.g. ARM targets
   ; which store ARM/Thumb state in the LSB
-  ret i32 and (i32 ptrtoint (void ()* @foo to i32), i32 -4)
+  ret i32 and (i32 ptrtoint (ptr @foo to i32), i32 -4)
 }
 
 define internal void @foo() align 16 {

diff  --git a/llvm/test/Analysis/ValueTracking/gep-negative-issue.ll b/llvm/test/Analysis/ValueTracking/gep-negative-issue.ll
index 90510aab384e6..98b6fa7958f0c 100644
--- a/llvm/test/Analysis/ValueTracking/gep-negative-issue.ll
+++ b/llvm/test/Analysis/ValueTracking/gep-negative-issue.ll
@@ -2,43 +2,43 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-p100:128:64:64-p101:128:64:64"
 target triple = "x86_64-unknown-linux-gnu"
 
-%ArrayImpl = type { i64, i64 addrspace(100)*, [1 x i64], [1 x i64], [1 x i64], i64, i64, double addrspace(100)*, double addrspace(100)*, i8, i64 }
-%_array = type { i64, %ArrayImpl addrspace(100)*, i8 }
+%ArrayImpl = type { i64, ptr addrspace(100), [1 x i64], [1 x i64], [1 x i64], i64, i64, ptr addrspace(100), ptr addrspace(100), i8, i64 }
+%_array = type { i64, ptr addrspace(100), i8 }
 
 define void @test(i64 %n_chpl) {
 entry:
   ; First section is some code
-  %0 = getelementptr inbounds %_array, %_array* null, i32 0, i32 1
-  %1 = load %ArrayImpl addrspace(100)*, %ArrayImpl addrspace(100)** %0
-  %2 = getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)* %1, i32 0, i32 8
-  %3 = load double addrspace(100)*, double addrspace(100)* addrspace(100)* %2
-  %4 = getelementptr inbounds double, double addrspace(100)* %3, i64 -1
+  %0 = getelementptr inbounds %_array, ptr null, i32 0, i32 1
+  %1 = load ptr addrspace(100), ptr %0
+  %2 = getelementptr inbounds %ArrayImpl, ptr addrspace(100) %1, i32 0, i32 8
+  %3 = load ptr addrspace(100), ptr addrspace(100) %2
+  %4 = getelementptr inbounds double, ptr addrspace(100) %3, i64 -1
   ; Second section is that code repeated
-  %x0 = getelementptr inbounds %_array, %_array* null, i32 0, i32 1
-  %x1 = load %ArrayImpl addrspace(100)*, %ArrayImpl addrspace(100)** %x0
-  %x2 = getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)* %x1, i32 0, i32 8
-  %x3 = load double addrspace(100)*, double addrspace(100)* addrspace(100)* %x2
-  %x4 = getelementptr inbounds double, double addrspace(100)* %x3, i64 -1
+  %x0 = getelementptr inbounds %_array, ptr null, i32 0, i32 1
+  %x1 = load ptr addrspace(100), ptr %x0
+  %x2 = getelementptr inbounds %ArrayImpl, ptr addrspace(100) %x1, i32 0, i32 8
+  %x3 = load ptr addrspace(100), ptr addrspace(100) %x2
+  %x4 = getelementptr inbounds double, ptr addrspace(100) %x3, i64 -1
   ; These two stores refer to the same memory location
   ; Even so, they are expected to remain separate stores here
-  store double 0.000000e+00, double addrspace(100)* %4
-  store double 0.000000e+00, double addrspace(100)* %x4
+  store double 0.000000e+00, ptr addrspace(100) %4
+  store double 0.000000e+00, ptr addrspace(100) %x4
   ; Third section is the repeated code again, with a later store
   ; This third section is necessary to trigger the crash
-  %y1 = load %ArrayImpl addrspace(100)*, %ArrayImpl addrspace(100)** %0
-  %y2 = getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)* %y1, i32 0, i32 8
-  %y3 = load double addrspace(100)*, double addrspace(100)* addrspace(100)* %y2
-  %y4 = getelementptr inbounds double, double addrspace(100)* %y3, i64 -1
-  store double 0.000000e+00, double addrspace(100)* %y4
+  %y1 = load ptr addrspace(100), ptr %0
+  %y2 = getelementptr inbounds %ArrayImpl, ptr addrspace(100) %y1, i32 0, i32 8
+  %y3 = load ptr addrspace(100), ptr addrspace(100) %y2
+  %y4 = getelementptr inbounds double, ptr addrspace(100) %y3, i64 -1
+  store double 0.000000e+00, ptr addrspace(100) %y4
   ret void
 ; CHECK-LABEL: define void @test
-; CHECK: getelementptr inbounds double, double addrspace(100)* {{%.*}}, i64 -1
-; CHECK-NEXT: store double 0.000000e+00, double addrspace(100)* [[DST:%.*]]
-; CHECK-NEXT: store double 0.000000e+00, double addrspace(100)* [[DST]]
+; CHECK: getelementptr inbounds double, ptr addrspace(100) {{%.*}}, i64 -1
+; CHECK-NEXT: store double 0.000000e+00, ptr addrspace(100) [[DST:%.*]]
+; CHECK-NEXT: store double 0.000000e+00, ptr addrspace(100) [[DST]]
 ; CHECK: load
-; CHECK: getelementptr inbounds %ArrayImpl, %ArrayImpl addrspace(100)*
+; CHECK: getelementptr inbounds %ArrayImpl, ptr addrspace(100)
 ; CHECK: load
-; CHECK: getelementptr inbounds double, double addrspace(100)* {{%.*}}, i64 -1
-; CHECK: store double 0.000000e+00, double addrspace(100)*
+; CHECK: getelementptr inbounds double, ptr addrspace(100) {{%.*}}, i64 -1
+; CHECK: store double 0.000000e+00, ptr addrspace(100)
 ; CHECK: ret
 }

diff  --git a/llvm/test/Analysis/ValueTracking/get-pointer-base-with-const-off.ll b/llvm/test/Analysis/ValueTracking/get-pointer-base-with-const-off.ll
index dbe37fdcc1b87..06aa0f5ccca7a 100644
--- a/llvm/test/Analysis/ValueTracking/get-pointer-base-with-const-off.ll
+++ b/llvm/test/Analysis/ValueTracking/get-pointer-base-with-const-off.ll
@@ -8,19 +8,15 @@ target datalayout = "e-p:32:32-p4:64:64"
 define i32 @addrspacecast-crash() {
 ; CHECK-LABEL: @addrspacecast-crash
 ; CHECK: %tmp = alloca [25 x i64]
-; CHECK: %tmp1 = getelementptr inbounds [25 x i64], [25 x i64]* %tmp, i32 0, i32 0
-; CHECK: %tmp2 = addrspacecast i64* %tmp1 to <8 x i64> addrspace(4)*
-; CHECK: store <8 x i64> zeroinitializer, <8 x i64> addrspace(4)* %tmp2
+; CHECK: %tmp2 = addrspacecast ptr %tmp to ptr addrspace(4)
+; CHECK: store <8 x i64> zeroinitializer, ptr addrspace(4) %tmp2
 ; CHECK-NOT: load
 bb:
   %tmp = alloca [25 x i64]
-  %tmp1 = getelementptr inbounds [25 x i64], [25 x i64]* %tmp, i32 0, i32 0
-  %tmp2 = addrspacecast i64* %tmp1 to <8 x i64> addrspace(4)*
-  %tmp3 = getelementptr inbounds <8 x i64>, <8 x i64> addrspace(4)* %tmp2, i64 0
-  store <8 x i64> zeroinitializer, <8 x i64> addrspace(4)* %tmp3
-  %tmp4 = getelementptr inbounds [25 x i64], [25 x i64]* %tmp, i32 0, i32 0
-  %tmp5 = addrspacecast i64* %tmp4 to i32 addrspace(4)*
-  %tmp6 = getelementptr inbounds i32, i32 addrspace(4)* %tmp5, i64 10
-  %tmp7 = load i32, i32 addrspace(4)* %tmp6, align 4
+  %tmp2 = addrspacecast ptr %tmp to ptr addrspace(4)
+  store <8 x i64> zeroinitializer, ptr addrspace(4) %tmp2
+  %tmp5 = addrspacecast ptr %tmp to ptr addrspace(4)
+  %tmp6 = getelementptr inbounds i32, ptr addrspace(4) %tmp5, i64 10
+  %tmp7 = load i32, ptr addrspace(4) %tmp6, align 4
   ret i32 %tmp7
 }

diff  --git a/llvm/test/Analysis/ValueTracking/invariant.group.ll b/llvm/test/Analysis/ValueTracking/invariant.group.ll
index edfe164dc565c..3d699a75b7f4c 100644
--- a/llvm/test/Analysis/ValueTracking/invariant.group.ll
+++ b/llvm/test/Analysis/ValueTracking/invariant.group.ll
@@ -2,33 +2,33 @@
 
 ; CHECK-LABEL: define void @checkNonnullLaunder()
 define void @checkNonnullLaunder() {
-; CHECK:   %[[p:.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %0)
-; CHECK:   call void @use(i8* nonnull %[[p]])
+; CHECK:   %[[p:.*]] = call ptr @llvm.launder.invariant.group.p0(ptr nonnull %0)
+; CHECK:   call void @use(ptr nonnull %[[p]])
 entry:
   %0 = alloca i8, align 8
 
-  %p = call i8* @llvm.launder.invariant.group.p0i8(i8* %0)
-  %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
-  call void @use(i8* %p2)
+  %p = call ptr @llvm.launder.invariant.group.p0(ptr %0)
+  %p2 = call ptr @llvm.launder.invariant.group.p0(ptr %p)
+  call void @use(ptr %p2)
 
   ret void
 }
 
 ; CHECK-LABEL: define void @checkNonnullStrip()
 define void @checkNonnullStrip() {
-; CHECK:   %[[p:.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %0)
-; CHECK:   call void @use(i8* nonnull %[[p]])
+; CHECK:   %[[p:.*]] = call ptr @llvm.strip.invariant.group.p0(ptr nonnull %0)
+; CHECK:   call void @use(ptr nonnull %[[p]])
 entry:
   %0 = alloca i8, align 8
 
-  %p = call i8* @llvm.strip.invariant.group.p0i8(i8* %0)
-  %p2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
-  call void @use(i8* %p2)
+  %p = call ptr @llvm.strip.invariant.group.p0(ptr %0)
+  %p2 = call ptr @llvm.strip.invariant.group.p0(ptr %p)
+  call void @use(ptr %p2)
 
   ret void
 }
 
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
-declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare ptr @llvm.launder.invariant.group.p0(ptr)
+declare ptr @llvm.strip.invariant.group.p0(ptr)
 
-declare void @use(i8*)
+declare void @use(ptr)

diff  --git a/llvm/test/Analysis/ValueTracking/known-bits-from-operator-constexpr.ll b/llvm/test/Analysis/ValueTracking/known-bits-from-operator-constexpr.ll
index d6832581fcfe3..e3e30e052ee58 100644
--- a/llvm/test/Analysis/ValueTracking/known-bits-from-operator-constexpr.ll
+++ b/llvm/test/Analysis/ValueTracking/known-bits-from-operator-constexpr.ll
@@ -7,9 +7,9 @@
 @g = global [21 x i32] zeroinitializer
 define i32 @test1(i32 %a) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    [[T:%.*]] = sub i32 [[A:%.*]], extractelement (<4 x i32> ptrtoint (<4 x i32*> getelementptr inbounds ([21 x i32], [21 x i32]* @g, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 17>) to <4 x i32>), i32 3)
+; CHECK-NEXT:    [[T:%.*]] = sub i32 [[A:%.*]], extractelement (<4 x i32> ptrtoint (<4 x ptr> getelementptr inbounds ([21 x i32], ptr @g, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 17>) to <4 x i32>), i32 3)
 ; CHECK-NEXT:    ret i32 [[T]]
 ;
-  %t = sub i32 %a, extractelement (<4 x i32> ptrtoint (<4 x i32 *> getelementptr inbounds ([21 x i32], [21 x i32] * @g, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 17>) to <4 x i32>), i32 3)
+  %t = sub i32 %a, extractelement (<4 x i32> ptrtoint (<4 x ptr> getelementptr inbounds ([21 x i32], ptr @g, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 17>) to <4 x i32>), i32 3)
   ret i32 %t
 }

diff  --git a/llvm/test/Analysis/ValueTracking/known-bits-from-range-md.ll b/llvm/test/Analysis/ValueTracking/known-bits-from-range-md.ll
index d705d49aca010..2c9ffb77844e4 100644
--- a/llvm/test/Analysis/ValueTracking/known-bits-from-range-md.ll
+++ b/llvm/test/Analysis/ValueTracking/known-bits-from-range-md.ll
@@ -1,33 +1,33 @@
 ; RUN: opt -S -passes=instsimplify,instcombine < %s | FileCheck %s
 
-define i1 @test0(i8* %ptr) {
+define i1 @test0(ptr %ptr) {
 ; CHECK-LABEL: @test0(
  entry:
-  %val = load i8, i8* %ptr, !range !{i8 -50, i8 0}
+  %val = load i8, ptr %ptr, !range !{i8 -50, i8 0}
   %and = and i8 %val, 128
   %is.eq = icmp eq i8 %and, 128
   ret i1 %is.eq
 ; CHECK: ret i1 true
 }
 
-define i1 @test1(i8* %ptr) {
+define i1 @test1(ptr %ptr) {
 ; CHECK-LABEL: @test1(
  entry:
-  %val = load i8, i8* %ptr, !range !{i8 64, i8 128}
+  %val = load i8, ptr %ptr, !range !{i8 64, i8 128}
   %and = and i8 %val, 64
   %is.eq = icmp eq i8 %and, 64
   ret i1 %is.eq
 ; CHECK: ret i1 true
 }
 
-define i1 @test2(i8* %ptr) {
+define i1 @test2(ptr %ptr) {
 ; CHECK-LABEL: @test2(
  entry:
 ; CHECK: %val = load i8
 ; CHECK: %and = and i8 %val
 ; CHECK: %is.eq = icmp ne i8 %and, 0
 ; CHECK: ret i1 %is.eq
-  %val = load i8, i8* %ptr, !range !{i8 64, i8 129}
+  %val = load i8, ptr %ptr, !range !{i8 64, i8 129}
   %and = and i8 %val, 64
   %is.eq = icmp eq i8 %and, 64
   ret i1 %is.eq

diff  --git a/llvm/test/Analysis/ValueTracking/known-non-equal.ll b/llvm/test/Analysis/ValueTracking/known-non-equal.ll
index 45ab2cc87cbfd..79b9b3fd8511b 100644
--- a/llvm/test/Analysis/ValueTracking/known-non-equal.ll
+++ b/llvm/test/Analysis/ValueTracking/known-non-equal.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes=instsimplify < %s -S | FileCheck %s
 
-define i1 @test(i8* %pq, i8 %B) {
+define i1 @test(ptr %pq, i8 %B) {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %q = load i8, i8* %pq, !range !0 ; %q is known nonzero; no known bits
+  %q = load i8, ptr %pq, !range !0 ; %q is known nonzero; no known bits
   %A = add nsw i8 %B, %q
   %cmp = icmp eq i8 %A, %B
   ret i1 %cmp
@@ -55,29 +55,29 @@ define i1 @zext(i8 %B) {
 define i1 @inttoptr(i32 %B) {
 ; CHECK-LABEL: @inttoptr(
 ; CHECK-NEXT:    [[A:%.*]] = add nsw i32 [[B:%.*]], 1
-; CHECK-NEXT:    [[A_CAST:%.*]] = inttoptr i32 [[A]] to i8*
-; CHECK-NEXT:    [[B_CAST:%.*]] = inttoptr i32 [[B]] to i8*
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[A_CAST]], [[B_CAST]]
+; CHECK-NEXT:    [[A_CAST:%.*]] = inttoptr i32 [[A]] to ptr
+; CHECK-NEXT:    [[B_CAST:%.*]] = inttoptr i32 [[B]] to ptr
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[A_CAST]], [[B_CAST]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %A = add nsw i32 %B, 1
-  %A.cast = inttoptr i32 %A to i8*
-  %B.cast = inttoptr i32 %B to i8*
-  %cmp = icmp eq i8* %A.cast, %B.cast
+  %A.cast = inttoptr i32 %A to ptr
+  %B.cast = inttoptr i32 %B to ptr
+  %cmp = icmp eq ptr %A.cast, %B.cast
   ret i1 %cmp
 }
 
-define i1 @ptrtoint(i32* %B) {
+define i1 @ptrtoint(ptr %B) {
 ; CHECK-LABEL: @ptrtoint(
-; CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 1
-; CHECK-NEXT:    [[A_CAST:%.*]] = ptrtoint i32* [[A]] to i32
-; CHECK-NEXT:    [[B_CAST:%.*]] = ptrtoint i32* [[B]] to i32
+; CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 1
+; CHECK-NEXT:    [[A_CAST:%.*]] = ptrtoint ptr [[A]] to i32
+; CHECK-NEXT:    [[B_CAST:%.*]] = ptrtoint ptr [[B]] to i32
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A_CAST]], [[B_CAST]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
-  %A = getelementptr inbounds i32, i32* %B, i32 1
-  %A.cast = ptrtoint i32* %A to i32
-  %B.cast = ptrtoint i32* %B to i32
+  %A = getelementptr inbounds i32, ptr %B, i32 1
+  %A.cast = ptrtoint ptr %A to i32
+  %B.cast = ptrtoint ptr %B to i32
   %cmp = icmp eq i32 %A.cast, %B.cast
   ret i1 %cmp
 }
@@ -206,11 +206,11 @@ define i1 @mul5(i8 %B, i8 %C) {
 define i1 @mul_constantexpr(i16 %a) {
 ; CHECK-LABEL: @mul_constantexpr(
 ; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i16 [[A:%.*]], 3
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i16 mul nsw (i16 ptrtoint (i16* @g to i16), i16 -1), [[MUL]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i16 mul nsw (i16 ptrtoint (ptr @g to i16), i16 -1), [[MUL]]
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %mul = mul nsw i16 %a, 3
-  %cmp = icmp eq i16 mul nsw (i16 ptrtoint (i16* @g to i16), i16 -1), %mul
+  %cmp = icmp eq i16 mul nsw (i16 ptrtoint (ptr @g to i16), i16 -1), %mul
   ret i1 %cmp
 }
 
@@ -291,7 +291,7 @@ define i1 @mul_other_may_be_zero_or_one(i16 %x, i16 %y) {
   ret i1 %cmp
 }
 
-define i1 @known_non_equal_phis(i8 %p, i8* %pq, i8 %n, i8 %r) {
+define i1 @known_non_equal_phis(i8 %p, ptr %pq, i8 %n, i8 %r) {
 ; CHECK-LABEL: @known_non_equal_phis(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
@@ -316,7 +316,7 @@ exit:
   ret i1 %cmp
 }
 
-define i1 @known_non_equal_phis_fail(i8 %p, i8* %pq, i8 %n, i8 %r) {
+define i1 @known_non_equal_phis_fail(i8 %p, ptr %pq, i8 %n, i8 %r) {
 ; CHECK-LABEL: @known_non_equal_phis_fail(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP:%.*]]

diff  --git a/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll b/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
index 9a48fe2d0ae06..79d2653a3a146 100644
--- a/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
+++ b/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
@@ -1,90 +1,90 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -passes=instsimplify < %s | FileCheck %s
 
-declare void @bar(i8* %a, i8* nonnull noundef %b)
-declare void @bar_without_noundef(i8* %a, i8* nonnull %b)
+declare void @bar(ptr %a, ptr nonnull noundef %b)
+declare void @bar_without_noundef(ptr %a, ptr nonnull %b)
 
 ; 'y' must be nonnull.
 
-define i1 @caller1(i8* %x, i8* %y) {
+define i1 @caller1(ptr %x, ptr %y) {
 ; CHECK-LABEL: @caller1(
-; CHECK-NEXT:    call void @bar(i8* [[X:%.*]], i8* [[Y:%.*]])
+; CHECK-NEXT:    call void @bar(ptr [[X:%.*]], ptr [[Y:%.*]])
 ; CHECK-NEXT:    ret i1 false
 ;
-  call void @bar(i8* %x, i8* %y)
-  %null_check = icmp eq i8* %y, null
+  call void @bar(ptr %x, ptr %y)
+  %null_check = icmp eq ptr %y, null
   ret i1 %null_check
 }
 
 ; Don't know anything about 'y'.
 
-define i1 @caller1_maybepoison(i8* %x, i8* %y) {
+define i1 @caller1_maybepoison(ptr %x, ptr %y) {
 ; CHECK-LABEL: @caller1_maybepoison(
-; CHECK-NEXT:    call void @bar_without_noundef(i8* [[X:%.*]], i8* [[Y:%.*]])
-; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq i8* [[Y]], null
+; CHECK-NEXT:    call void @bar_without_noundef(ptr [[X:%.*]], ptr [[Y:%.*]])
+; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq ptr [[Y]], null
 ; CHECK-NEXT:    ret i1 [[NULL_CHECK]]
 ;
-  call void @bar_without_noundef(i8* %x, i8* %y)
-  %null_check = icmp eq i8* %y, null
+  call void @bar_without_noundef(ptr %x, ptr %y)
+  %null_check = icmp eq ptr %y, null
   ret i1 %null_check
 }
 
 ; Don't know anything about 'y'.
 
-define i1 @caller2(i8* %x, i8* %y) {
+define i1 @caller2(ptr %x, ptr %y) {
 ; CHECK-LABEL: @caller2(
-; CHECK-NEXT:    call void @bar(i8* [[Y:%.*]], i8* [[X:%.*]])
-; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq i8* [[Y]], null
+; CHECK-NEXT:    call void @bar(ptr [[Y:%.*]], ptr [[X:%.*]])
+; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq ptr [[Y]], null
 ; CHECK-NEXT:    ret i1 [[NULL_CHECK]]
 ;
-  call void @bar(i8* %y, i8* %x)
-  %null_check = icmp eq i8* %y, null
+  call void @bar(ptr %y, ptr %x)
+  %null_check = icmp eq ptr %y, null
   ret i1 %null_check
 }
 
 ; 'y' must be nonnull.
 
-define i1 @caller3(i8* %x, i8* %y) {
+define i1 @caller3(ptr %x, ptr %y) {
 ; CHECK-LABEL: @caller3(
-; CHECK-NEXT:    call void @bar(i8* [[X:%.*]], i8* [[Y:%.*]])
+; CHECK-NEXT:    call void @bar(ptr [[X:%.*]], ptr [[Y:%.*]])
 ; CHECK-NEXT:    ret i1 true
 ;
-  call void @bar(i8* %x, i8* %y)
-  %null_check = icmp ne i8* %y, null
+  call void @bar(ptr %x, ptr %y)
+  %null_check = icmp ne ptr %y, null
   ret i1 %null_check
 }
 
 ; FIXME: The call is guaranteed to execute, so 'y' must be nonnull throughout.
 
-define i1 @caller4(i8* %x, i8* %y) {
+define i1 @caller4(ptr %x, ptr %y) {
 ; CHECK-LABEL: @caller4(
-; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp ne i8* [[Y:%.*]], null
-; CHECK-NEXT:    call void @bar(i8* [[X:%.*]], i8* [[Y]])
+; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp ne ptr [[Y:%.*]], null
+; CHECK-NEXT:    call void @bar(ptr [[X:%.*]], ptr [[Y]])
 ; CHECK-NEXT:    ret i1 [[NULL_CHECK]]
 ;
-  %null_check = icmp ne i8* %y, null
-  call void @bar(i8* %x, i8* %y)
+  %null_check = icmp ne ptr %y, null
+  call void @bar(ptr %x, ptr %y)
   ret i1 %null_check
 }
 
 ; The call to bar() does not dominate the null check, so no change.
 
-define i1 @caller5(i8* %x, i8* %y) {
+define i1 @caller5(ptr %x, ptr %y) {
 ; CHECK-LABEL: @caller5(
-; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq i8* [[Y:%.*]], null
+; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq ptr [[Y:%.*]], null
 ; CHECK-NEXT:    br i1 [[NULL_CHECK]], label [[T:%.*]], label [[F:%.*]]
 ; CHECK:       t:
 ; CHECK-NEXT:    ret i1 [[NULL_CHECK]]
 ; CHECK:       f:
-; CHECK-NEXT:    call void @bar(i8* [[X:%.*]], i8* [[Y]])
+; CHECK-NEXT:    call void @bar(ptr [[X:%.*]], ptr [[Y]])
 ; CHECK-NEXT:    ret i1 [[NULL_CHECK]]
 ;
-  %null_check = icmp eq i8* %y, null
+  %null_check = icmp eq ptr %y, null
   br i1 %null_check, label %t, label %f
 t:
   ret i1 %null_check
 f:
-  call void @bar(i8* %x, i8* %y)
+  call void @bar(ptr %x, ptr %y)
   ret i1 %null_check
 }
 
@@ -92,131 +92,131 @@ f:
 
 declare i32 @esfp(...)
 
-define i1 @caller6(i8* %x, i8* %y) personality i8* bitcast (i32 (...)* @esfp to i8*){
+define i1 @caller6(ptr %x, ptr %y) personality ptr @esfp{
 ; CHECK-LABEL: @caller6(
-; CHECK-NEXT:    invoke void @bar(i8* [[X:%.*]], i8* nonnull [[Y:%.*]])
+; CHECK-NEXT:    invoke void @bar(ptr [[X:%.*]], ptr nonnull [[Y:%.*]])
 ; CHECK-NEXT:    to label [[CONT:%.*]] unwind label [[EXC:%.*]]
 ; CHECK:       cont:
 ; CHECK-NEXT:    ret i1 false
 ; CHECK:       exc:
-; CHECK-NEXT:    [[LP:%.*]] = landingpad { i8*, i32 }
-; CHECK-NEXT:    filter [0 x i8*] zeroinitializer
+; CHECK-NEXT:    [[LP:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT:    filter [0 x ptr] zeroinitializer
 ; CHECK-NEXT:    unreachable
 ;
-  invoke void @bar(i8* %x, i8* nonnull %y)
+  invoke void @bar(ptr %x, ptr nonnull %y)
   to label %cont unwind label %exc
 
 cont:
-  %null_check = icmp eq i8* %y, null
+  %null_check = icmp eq ptr %y, null
   ret i1 %null_check
 
 exc:
-  %lp = landingpad { i8*, i32 }
-  filter [0 x i8*] zeroinitializer
+  %lp = landingpad { ptr, i32 }
+  filter [0 x ptr] zeroinitializer
   unreachable
 }
 
-declare i8* @returningPtr(i8* returned %p)
+declare ptr @returningPtr(ptr returned %p)
 
-define i1 @nonnullReturnTest(i8* nonnull %x) {
+define i1 @nonnullReturnTest(ptr nonnull %x) {
 ; CHECK-LABEL: @nonnullReturnTest(
-; CHECK-NEXT:    [[X2:%.*]] = call i8* @returningPtr(i8* [[X:%.*]])
+; CHECK-NEXT:    [[X2:%.*]] = call ptr @returningPtr(ptr [[X:%.*]])
 ; CHECK-NEXT:    ret i1 false
 ;
-  %x2 = call i8* @returningPtr(i8* %x)
-  %null_check = icmp eq i8* %x2, null
+  %x2 = call ptr @returningPtr(ptr %x)
+  %null_check = icmp eq ptr %x2, null
   ret i1 %null_check
 }
 
-define i1 @unknownReturnTest(i8* %x) {
+define i1 @unknownReturnTest(ptr %x) {
 ; CHECK-LABEL: @unknownReturnTest(
-; CHECK-NEXT:    [[X2:%.*]] = call i8* @returningPtr(i8* [[X:%.*]])
-; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq i8* [[X2]], null
+; CHECK-NEXT:    [[X2:%.*]] = call ptr @returningPtr(ptr [[X:%.*]])
+; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq ptr [[X2]], null
 ; CHECK-NEXT:    ret i1 [[NULL_CHECK]]
 ;
-  %x2 = call i8* @returningPtr(i8* %x)
-  %null_check = icmp eq i8* %x2, null
+  %x2 = call ptr @returningPtr(ptr %x)
+  %null_check = icmp eq ptr %x2, null
   ret i1 %null_check
 }
 
 ; Make sure that if load/store happened, the pointer is nonnull.
 
-define i32 @test_null_after_store(i32* %0) {
+define i32 @test_null_after_store(ptr %0) {
 ; CHECK-LABEL: @test_null_after_store(
-; CHECK-NEXT:    store i32 123, i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT:    store i32 123, ptr [[TMP0:%.*]], align 4
 ; CHECK-NEXT:    ret i32 2
 ;
-  store i32 123, i32* %0, align 4
-  %2 = icmp eq i32* %0, null
+  store i32 123, ptr %0, align 4
+  %2 = icmp eq ptr %0, null
   %3 = select i1 %2, i32 1, i32 2
   ret i32 %3
 }
 
-define i32 @test_null_after_load(i32* %0) {
+define i32 @test_null_after_load(ptr %0) {
 ; CHECK-LABEL: @test_null_after_load(
 ; CHECK-NEXT:    ret i32 1
 ;
-  %2 = load i32, i32* %0, align 4
-  %3 = icmp eq i32* %0, null
+  %2 = load i32, ptr %0, align 4
+  %3 = icmp eq ptr %0, null
   %4 = select i1 %3, i32 %2, i32 1
   ret i32 %4
 }
 
 ; Make sure that 
diff erent address space does not affect null pointer check.
 
-define i32 @test_null_after_store_addrspace(i32 addrspace(1)* %0) {
+define i32 @test_null_after_store_addrspace(ptr addrspace(1) %0) {
 ; CHECK-LABEL: @test_null_after_store_addrspace(
-; CHECK-NEXT:    store i32 123, i32 addrspace(1)* [[TMP0:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 addrspace(1)* [[TMP0]], null
+; CHECK-NEXT:    store i32 123, ptr addrspace(1) [[TMP0:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq ptr addrspace(1) [[TMP0]], null
 ; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 1, i32 2
 ; CHECK-NEXT:    ret i32 [[TMP3]]
 ;
-  store i32 123, i32 addrspace(1)* %0, align 4
-  %2 = icmp eq i32 addrspace(1)* %0, null
+  store i32 123, ptr addrspace(1) %0, align 4
+  %2 = icmp eq ptr addrspace(1) %0, null
   %3 = select i1 %2, i32 1, i32 2
   ret i32 %3
 }
 
-define i32 @test_null_after_load_addrspace(i32 addrspace(1)* %0) {
+define i32 @test_null_after_load_addrspace(ptr addrspace(1) %0) {
 ; CHECK-LABEL: @test_null_after_load_addrspace(
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32 addrspace(1)* [[TMP0:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i32 addrspace(1)* [[TMP0]], null
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(1) [[TMP0:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq ptr addrspace(1) [[TMP0]], null
 ; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
 ; CHECK-NEXT:    ret i32 [[TMP4]]
 ;
-  %2 = load i32, i32 addrspace(1)* %0, align 4
-  %3 = icmp eq i32 addrspace(1)* %0, null
+  %2 = load i32, ptr addrspace(1) %0, align 4
+  %3 = icmp eq ptr addrspace(1) %0, null
   %4 = select i1 %3, i32 %2, i32 1
   ret i32 %4
 }
 
 ; Make sure if store happened after the check, nullptr check is not removed.
 
-declare i8* @func(i64)
+declare ptr @func(i64)
 
-define i8* @test_load_store_after_check(i8* %0) {
+define ptr @test_load_store_after_check(ptr %0) {
 ; CHECK-LABEL: @test_load_store_after_check(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8* @func(i64 0)
-; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq i8* [[TMP1]], null
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @func(i64 0)
+; CHECK-NEXT:    [[NULL_CHECK:%.*]] = icmp eq ptr [[TMP1]], null
 ; CHECK-NEXT:    br i1 [[NULL_CHECK]], label [[RETURN:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i8 7, i8* [[TMP1]], align 1
+; CHECK-NEXT:    store i8 7, ptr [[TMP1]], align 1
 ; CHECK-NEXT:    br label [[RETURN]]
 ; CHECK:       return:
-; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi i8* [ [[TMP1]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    ret i8* [[RETVAL_0]]
+; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi ptr [ [[TMP1]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    ret ptr [[RETVAL_0]]
 ;
 entry:
-  %1 = call i8* @func(i64 0)
-  %null_check = icmp eq i8* %1, null
+  %1 = call ptr @func(i64 0)
+  %null_check = icmp eq ptr %1, null
   br i1 %null_check, label %return, label %if.end
 
 if.end:
-  store i8 7, i8* %1
+  store i8 7, ptr %1
   br label %return
 
 return:
-  %retval.0 = phi i8* [ %1, %if.end ], [ null, %entry ]
-  ret i8* %retval.0
+  %retval.0 = phi ptr [ %1, %if.end ], [ null, %entry ]
+  ret ptr %retval.0
 }

diff  --git a/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll b/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll
index b51fc8ec4ccd0..874d68e799a7f 100644
--- a/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll
+++ b/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll
@@ -7,9 +7,9 @@ target datalayout = "p:32:32-p3:32:32-p4:64:64"
 
 ; CHECK-LABEL: @test_shift
 ; CHECK-NOT: ret i64 0
-define i64 @test_shift(i8* %p) {
-  %g = addrspacecast i8* %p to i8 addrspace(4)*
-  %i = ptrtoint i8 addrspace(4)* %g to i64
+define i64 @test_shift(ptr %p) {
+  %g = addrspacecast ptr %p to ptr addrspace(4)
+  %i = ptrtoint ptr addrspace(4) %g to i64
   %shift = lshr i64 %i, 32
   ret i64 %shift
 }
@@ -18,7 +18,7 @@ define i64 @test_shift(i8* %p) {
 ; A null pointer casted to another addr space may no longer have null value.
 ; CHECK-NOT: ret i32 0
 define i32 @test_null() {
-  %g = addrspacecast i8* null to i8 addrspace(3)*
-  %i = ptrtoint i8 addrspace(3)* %g to i32
+  %g = addrspacecast ptr null to ptr addrspace(3)
+  %i = ptrtoint ptr addrspace(3) %g to i32
   ret i32 %i
 }

diff  --git a/llvm/test/Analysis/ValueTracking/knownzero-shift.ll b/llvm/test/Analysis/ValueTracking/knownzero-shift.ll
index 283845b7e2234..d4ed849c231f8 100644
--- a/llvm/test/Analysis/ValueTracking/knownzero-shift.ll
+++ b/llvm/test/Analysis/ValueTracking/knownzero-shift.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -passes=instsimplify -S < %s | FileCheck %s
 
-define i1 @test(i8 %p, i8* %pq) {
+define i1 @test(i8 %p, ptr %pq) {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    ret i1 false
 ;
-  %q = load i8, i8* %pq, !range !0 ; %q is known nonzero; no known bits
+  %q = load i8, ptr %pq, !range !0 ; %q is known nonzero; no known bits
   %1 = or i8 %p, 2                 ; %1[1] = 1
   %2 = and i8 %1, 254              ; %2[0] = 0, %2[1] = 1
   %A = lshr i8 %2, 1               ; We should know that %A is nonzero.

diff  --git a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
index 1342b8ba4a473..7a976fa7c0f81 100644
--- a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
+++ b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
@@ -11,9 +11,9 @@ target datalayout = "e-i32:32:64"
 
 declare zeroext i1 @return_i1()
 
-declare i32* @foo()
+declare ptr @foo()
 @globalstr = global [6 x i8] c"hello\00"
- at globali32ptr = external global i32*
+ at globali32ptr = external global ptr
 
 %struct.A = type { [8 x i8], [5 x i8] }
 @globalstruct = external global %struct.A
@@ -25,19 +25,19 @@ declare i32* @foo()
 ; CHECK-LABEL: 'test_sret'
 ; CHECK: %sret_gep{{.*}}(aligned)
 ; CHECK-NOT: %sret_gep_outside
-define void @test_sret(%struct.A* sret(%struct.A) %result) {
-  %sret_gep = getelementptr inbounds %struct.A, %struct.A* %result, i64 0, i32 1, i64 2
-  load i8, i8* %sret_gep
+define void @test_sret(ptr sret(%struct.A) %result) {
+  %sret_gep = getelementptr inbounds %struct.A, ptr %result, i64 0, i32 1, i64 2
+  load i8, ptr %sret_gep
 
-  %sret_gep_outside = getelementptr %struct.A, %struct.A* %result, i64 0, i32 1, i64 7
-  load i8, i8* %sret_gep_outside
+  %sret_gep_outside = getelementptr %struct.A, ptr %result, i64 0, i32 1, i64 7
+  load i8, ptr %sret_gep_outside
   ret void
 }
 
 ; CHECK-LABEL: 'test'
-define void @test(i32 addrspace(1)* dereferenceable(8) %dparam,
-                  i8 addrspace(1)* dereferenceable(32) align 1 %dparam.align1,
-                  i8 addrspace(1)* dereferenceable(32) align 16 %dparam.align16)
+define void @test(ptr addrspace(1) dereferenceable(8) %dparam,
+                  ptr addrspace(1) dereferenceable(32) align 1 %dparam.align1,
+                  ptr addrspace(1) dereferenceable(32) align 16 %dparam.align16)
     gc "statepoint-example" {
 ; CHECK: The following are dereferenceable:
 entry:
@@ -45,53 +45,53 @@ entry:
 
 ; GLOBAL: %dparam{{.*}}(unaligned)
 ; POINT-NOT: %dparam{{.*}}(unaligned)
-    %load3 = load i32, i32 addrspace(1)* %dparam
+    %load3 = load i32, ptr addrspace(1) %dparam
 
 ; GLOBAL: %relocate{{.*}}(unaligned)
 ; POINT-NOT: %relocate{{.*}}(unaligned)
-    %tok = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %dparam)]
-    %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %tok, i32 0, i32 0)
-    %load4 = load i32, i32 addrspace(1)* %relocate
+    %tok = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %dparam)]
+    %relocate = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tok, i32 0, i32 0)
+    %load4 = load i32, ptr addrspace(1) %relocate
 
 ; CHECK-NOT: %nparam
-    %dpa = call i32 addrspace(1)* @func1(i32 addrspace(1)* %dparam)
-    %nparam = getelementptr i32, i32 addrspace(1)* %dpa, i32 5
-    %load5 = load i32, i32 addrspace(1)* %nparam
+    %dpa = call ptr addrspace(1) @func1(ptr addrspace(1) %dparam)
+    %nparam = getelementptr i32, ptr addrspace(1) %dpa, i32 5
+    %load5 = load i32, ptr addrspace(1) %nparam
 
     ; Load from a non-dereferenceable load
 ; CHECK-NOT: %nd_load
-    %nd_load = load i32*, i32** @globali32ptr
-    %load6 = load i32, i32* %nd_load
+    %nd_load = load ptr, ptr @globali32ptr
+    %load6 = load i32, ptr %nd_load
 
     ; Load from a dereferenceable load
 ; GLOBAL: %d4_load{{.*}}(unaligned)
 ; POINT-NOT: %d4_load{{.*}}(unaligned)
-    %d4_load = load i32*, i32** @globali32ptr, !dereferenceable !0
-    %load7 = load i32, i32* %d4_load
+    %d4_load = load ptr, ptr @globali32ptr, !dereferenceable !0
+    %load7 = load i32, ptr %d4_load
 
     ; Load from an offset not covered by the dereferenceable portion
 ; CHECK-NOT: %d2_load
-    %d2_load = load i32*, i32** @globali32ptr, !dereferenceable !1
-    %load8 = load i32, i32* %d2_load
+    %d2_load = load ptr, ptr @globali32ptr, !dereferenceable !1
+    %load8 = load i32, ptr %d2_load
 
     ; Load from a potentially null pointer with dereferenceable_or_null
 ; CHECK-NOT: %d_or_null_load
-    %d_or_null_load = load i32*, i32** @globali32ptr, !dereferenceable_or_null !0
-    %load9 = load i32, i32* %d_or_null_load
+    %d_or_null_load = load ptr, ptr @globali32ptr, !dereferenceable_or_null !0
+    %load9 = load i32, ptr %d_or_null_load
 
     ; Load from a non-null pointer with dereferenceable_or_null
 ; GLOBAL: %d_or_null_non_null_load{{.*}}(unaligned)
 ; POINT-NOT: %d_or_null_non_null_load{{.*}}(unaligned)
-    %d_or_null_non_null_load = load i32*, i32** @globali32ptr, !nonnull !2, !dereferenceable_or_null !0
-    %load10 = load i32, i32* %d_or_null_non_null_load
+    %d_or_null_non_null_load = load ptr, ptr @globali32ptr, !nonnull !2, !dereferenceable_or_null !0
+    %load10 = load i32, ptr %d_or_null_non_null_load
 
     ; Loads from aligned arguments
 ; GLOBAL: %dparam.align1{{.*}}(unaligned)
 ; POINT-NOT: %dparam.align1{{.*}}(unaligned)
 ; POINT-NOT: %dparam.align16{{.*}}(aligned)
 ; GLOBAL: %dparam.align16{{.*}}(aligned)
-    %load15 = load i8, i8 addrspace(1)* %dparam.align1, align 16
-    %load16 = load i8, i8 addrspace(1)* %dparam.align16, align 16
+    %load15 = load i8, ptr addrspace(1) %dparam.align1, align 16
+    %load16 = load i8, ptr addrspace(1) %dparam.align16, align 16
 
     ; Loads from GEPs
 ; GLOBAL: %gep.align1.offset1{{.*}}(unaligned)
@@ -102,36 +102,36 @@ entry:
 ; POINT-NOT: %gep.align16.offset1{{.*}}(unaligned)
 ; POINT-NOT: %gep.align1.offset16{{.*}}(unaligned)
 ; POINT-NOT: %gep.align16.offset16{{.*}}(aligned)
-    %gep.align1.offset1 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align1, i32 1
-    %gep.align16.offset1 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align16, i32 1
-    %gep.align1.offset16 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align1, i32 16
-    %gep.align16.offset16 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align16, i32 16
-    %load19 = load i8, i8 addrspace(1)* %gep.align1.offset1, align 16
-    %load20 = load i8, i8 addrspace(1)* %gep.align16.offset1, align 16
-    %load21 = load i8, i8 addrspace(1)* %gep.align1.offset16, align 16
-    %load22 = load i8, i8 addrspace(1)* %gep.align16.offset16, align 16
+    %gep.align1.offset1 = getelementptr inbounds i8, ptr addrspace(1) %dparam.align1, i32 1
+    %gep.align16.offset1 = getelementptr inbounds i8, ptr addrspace(1) %dparam.align16, i32 1
+    %gep.align1.offset16 = getelementptr inbounds i8, ptr addrspace(1) %dparam.align1, i32 16
+    %gep.align16.offset16 = getelementptr inbounds i8, ptr addrspace(1) %dparam.align16, i32 16
+    %load19 = load i8, ptr addrspace(1) %gep.align1.offset1, align 16
+    %load20 = load i8, ptr addrspace(1) %gep.align16.offset1, align 16
+    %load21 = load i8, ptr addrspace(1) %gep.align1.offset16, align 16
+    %load22 = load i8, ptr addrspace(1) %gep.align16.offset16, align 16
 
 ; CHECK-NOT: %no_deref_return
 ; GLOBAL: %deref_return{{.*}}(unaligned)
 ; GLOBAL: %deref_and_aligned_return{{.*}}(aligned)
 ; POINT-NOT: %deref_return{{.*}}(unaligned)
 ; POINT-NOT: %deref_and_aligned_return{{.*}}(aligned)
-    %no_deref_return = call i32* @foo()
-    %deref_return = call dereferenceable(32) i32* @foo()
-    %deref_and_aligned_return = call dereferenceable(32) align 16 i32* @foo()
-    %load23 = load i32, i32* %no_deref_return
-    %load24 = load i32, i32* %deref_return, align 16
-    %load25 = load i32, i32* %deref_and_aligned_return, align 16
+    %no_deref_return = call ptr @foo()
+    %deref_return = call dereferenceable(32) ptr @foo()
+    %deref_and_aligned_return = call dereferenceable(32) align 16 ptr @foo()
+    %load23 = load i32, ptr %no_deref_return
+    %load24 = load i32, ptr %deref_return, align 16
+    %load25 = load i32, ptr %deref_and_aligned_return, align 16
 
     ; Load from a dereferenceable and aligned load
 ; GLOBAL: %d4_unaligned_load{{.*}}(unaligned)
 ; GLOBAL: %d4_aligned_load{{.*}}(aligned)
 ; POINT-NOT: %d4_unaligned_load{{.*}}(unaligned)
 ; POINT-NOT: %d4_aligned_load{{.*}}(aligned)
-    %d4_unaligned_load = load i32*, i32** @globali32ptr, !dereferenceable !0
-    %d4_aligned_load = load i32*, i32** @globali32ptr, !dereferenceable !0, !align !{i64 16}
-    %load26 = load i32, i32* %d4_unaligned_load, align 16
-    %load27 = load i32, i32* %d4_aligned_load, align 16
+    %d4_unaligned_load = load ptr, ptr @globali32ptr, !dereferenceable !0
+    %d4_aligned_load = load ptr, ptr @globali32ptr, !dereferenceable !0, !align !{i64 16}
+    %load26 = load i32, ptr %d4_unaligned_load, align 16
+    %load27 = load i32, ptr %d4_aligned_load, align 16
     ret void
 }
 
@@ -143,8 +143,8 @@ define void @alloca_aligned() {
    %alloca.align1 = alloca i1, align 1
    %alloca.align16 = alloca i1, align 16
    call void @mayfree()
-   %load17 = load i1, i1* %alloca.align1, align 16
-   %load18 = load i1, i1* %alloca.align16, align 16
+   %load17 = load i1, ptr %alloca.align1, align 16
+   %load18 = load i1, ptr %alloca.align16, align 16
    ret void
 }
 
@@ -153,7 +153,7 @@ define void @alloca_aligned() {
 define void @alloca_basic() {
   %alloca = alloca i1
   call void @mayfree()
-  %load2 = load i1, i1* %alloca
+  %load2 = load i1, ptr %alloca
   ret void
 }
 
@@ -163,7 +163,7 @@ define void @alloca_basic() {
 define void @alloca_empty() {
   %empty_alloca = alloca i8, i64 0
   call void @mayfree()
-  %empty_load = load i8, i8* %empty_alloca
+  %empty_load = load i8, ptr %empty_alloca
   ret void
 }
 
@@ -174,20 +174,19 @@ define void @alloca_empty() {
 define void @alloca_perfalign() {
    %alloca.noalign = alloca i32
    call void @mayfree()
-   %load28 = load i32, i32* %alloca.noalign, align 8
+   %load28 = load i32, ptr %alloca.noalign, align 8
    ret void
 }
 
 ; CHECK-LABEL: 'global'
 ; CHECK: @globalptr.align1{{.*}}(unaligned)
 ; CHECK: @globalptr.align16{{.*}}(aligned)
-; CHECK: %globalptr{{.*}}(aligned)
+; CHECK: @globalstr{{.*}}(aligned)
 define void @global() {
-  %load13 = load i8, i8* @globalptr.align1, align 16
-  %load14 = load i8, i8* @globalptr.align16, align 16
+  %load13 = load i8, ptr @globalptr.align1, align 16
+  %load14 = load i8, ptr @globalptr.align16, align 16
 
-  %globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0
-  %load1 = load i8, i8* %globalptr
+  %load1 = load i8, ptr @globalstr
   ret void
 }
 
@@ -197,11 +196,11 @@ define void @global() {
 ; CHECK: %within_allocation{{.*}}(aligned)
 ; CHECK-NOT: %outside_allocation
 define void @global_allocationsize() {
-  %within_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 0, i64 10
-  %load11 = load i8, i8* %within_allocation
+  %within_allocation = getelementptr inbounds %struct.A, ptr @globalstruct, i64 0, i32 0, i64 10
+  %load11 = load i8, ptr %within_allocation
 
-  %outside_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 1, i64 10
-  %load12 = load i8, i8* %outside_allocation
+  %outside_allocation = getelementptr inbounds %struct.A, ptr @globalstruct, i64 0, i32 1, i64 10
+  %load12 = load i8, ptr %outside_allocation
   ret void
 }
 
@@ -210,30 +209,28 @@ define void @global_allocationsize() {
 ; CHECK: %i8_byval{{.*}}(aligned)
 ; CHECK-NOT: %bad_byval_cast
 ; CHECK: %byval_gep{{.*}}(aligned)
-; CHECK: %good_byval_cast{{.*}}(unaligned)
-define void @byval(i8* byval(i8) %i8_byval,
-                   %struct.A* byval(%struct.A) %A_byval) {
+; CHECK: %A_byval{{.*}}(unaligned)
+define void @byval(ptr byval(i8) %i8_byval,
+                   ptr byval(%struct.A) %A_byval) {
   call void @mayfree()
-  load i8, i8* %i8_byval
+  load i8, ptr %i8_byval
 
-  %bad_byval_cast = bitcast i8* %i8_byval to i32*
-  load i32, i32* %bad_byval_cast
+  load i32, ptr %i8_byval
 
-  %byval_gep = getelementptr inbounds %struct.A, %struct.A* %A_byval, i64 0, i32 1, i64 2
-  load i8, i8* %byval_gep
-  %good_byval_cast = bitcast %struct.A* %A_byval to i32*
-  load i32, i32* %good_byval_cast
+  %byval_gep = getelementptr inbounds %struct.A, ptr %A_byval, i64 0, i32 1, i64 2
+  load i8, ptr %byval_gep
+  load i32, ptr %A_byval
 
   ret void
 }
 
 ; CHECK-LABEL: 'f_0'
-; GLOBAL: %ptr = inttoptr i32 %val to i32*, !dereferenceable !0
-; POINT-NOT: %ptr = inttoptr i32 %val to i32*, !dereferenceable !0
+; GLOBAL: %ptr = inttoptr i32 %val to ptr, !dereferenceable !0
+; POINT-NOT: %ptr = inttoptr i32 %val to ptr, !dereferenceable !0
 define i32 @f_0(i32 %val) {
-  %ptr = inttoptr i32 %val to i32*, !dereferenceable !0
+  %ptr = inttoptr i32 %val to ptr, !dereferenceable !0
   call void @mayfree()
-  %load29 = load i32, i32* %ptr, align 8
+  %load29 = load i32, ptr %ptr, align 8
   ret i32 %load29
 }
 
@@ -243,17 +240,17 @@ define i32 @f_0(i32 %val) {
 ; CHECK-LABEL: 'negative'
 ; GLOBAL: %p
 ; POINT-NOT: %p
-define void @negative(i32* dereferenceable(8) %p) {
+define void @negative(ptr dereferenceable(8) %p) {
   call void @mayfree()
-  %v = load i32, i32* %p
+  %v = load i32, ptr %p
   ret void
 }
 
 ; CHECK-LABEL: 'infer_func_attrs1'
 ; CHECK: %p
-define void @infer_func_attrs1(i32* dereferenceable(8) %p) nofree nosync {
+define void @infer_func_attrs1(ptr dereferenceable(8) %p) nofree nosync {
   call void @mayfree()
-  %v = load i32, i32* %p
+  %v = load i32, ptr %p
   ret void
 }
 
@@ -261,9 +258,9 @@ define void @infer_func_attrs1(i32* dereferenceable(8) %p) nofree nosync {
 ; GLOBAL: %p
 ; POINT-NOT: %p
 ; FIXME: Can be inferred from attributes
-define void @infer_func_attrs2(i32* dereferenceable(8) %p) readonly {
+define void @infer_func_attrs2(ptr dereferenceable(8) %p) readonly {
   call void @mayfree()
-  %v = load i32, i32* %p
+  %v = load i32, ptr %p
   ret void
 }
 
@@ -271,9 +268,9 @@ define void @infer_func_attrs2(i32* dereferenceable(8) %p) readonly {
 ; GLOBAL: %p
 ; POINT-NOT: %p
 ; FIXME: Can be inferred from attributes
-define void @infer_noalias1(i32* dereferenceable(8) noalias nofree %p) {
+define void @infer_noalias1(ptr dereferenceable(8) noalias nofree %p) {
   call void @mayfree()
-  %v = load i32, i32* %p
+  %v = load i32, ptr %p
   ret void
 }
 
@@ -281,34 +278,32 @@ define void @infer_noalias1(i32* dereferenceable(8) noalias nofree %p) {
 ; GLOBAL: %p
 ; POINT-NOT: %p
 ; FIXME: Can be inferred from attributes
-define void @infer_noalias2(i32* dereferenceable(8) noalias readonly %p) nosync {
+define void @infer_noalias2(ptr dereferenceable(8) noalias readonly %p) nosync {
   call void @mayfree()
-  %v = load i32, i32* %p
+  %v = load i32, ptr %p
   ret void
 }
 
 
 ; Just check that we don't crash.
 ; CHECK-LABEL: 'opaque_type_crasher'
-define void @opaque_type_crasher(%TypeOpaque* dereferenceable(16) %a) {
+define void @opaque_type_crasher(ptr dereferenceable(16) %a) {
 entry:
-  %bc = bitcast %TypeOpaque* %a to i8*
-  %ptr8 = getelementptr inbounds i8, i8* %bc, i32 8
-  %ptr32 = bitcast i8* %ptr8 to i32*
+  %ptr8 = getelementptr inbounds i8, ptr %a, i32 8
   br i1 undef, label %if.then, label %if.end
 
 if.then:
-  %res = load i32, i32* %ptr32, align 4
+  %res = load i32, ptr %ptr8, align 4
   br label %if.end
 
 if.end:
   ret void
 }
 
-declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, i1 ()*, i32, i32, ...)
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)
 
-declare i32 addrspace(1)* @func1(i32 addrspace(1)* returned) nounwind argmemonly
+declare ptr addrspace(1) @func1(ptr addrspace(1) returned) nounwind argmemonly
 
 ; Can free any object accessible in memory
 declare void @mayfree()

diff  --git a/llvm/test/Analysis/ValueTracking/select-pattern.ll b/llvm/test/Analysis/ValueTracking/select-pattern.ll
index ace5d8fcf27a3..f553baf4105f7 100644
--- a/llvm/test/Analysis/ValueTracking/select-pattern.ll
+++ b/llvm/test/Analysis/ValueTracking/select-pattern.ll
@@ -5,7 +5,7 @@
 ; as an operand to be analyzed. This would then cause
 ; infinite recursion and eventual crash.
 
-define void @PR36045(i1 %t, i32* %b) {
+define void @PR36045(i1 %t, ptr %b) {
 ; CHECK-LABEL: @PR36045(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[T:%.*]], true
@@ -36,7 +36,7 @@ for.end:
   br i1 %t, label %unreach2, label %then12
 
 then12:
-  store i32 0, i32* %b
+  store i32 0, ptr %b
   br label %unreach2
 
 unreach2:


        


More information about the llvm-commits mailing list