[llvm] 3c514d3 - [EarlyCSE] Update tests to use opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 10 00:54:45 PDT 2022


Author: Nikita Popov
Date: 2022-06-10T09:53:35+02:00
New Revision: 3c514d31d7913f7841ed6a31eff27397a76207e4

URL: https://github.com/llvm/llvm-project/commit/3c514d31d7913f7841ed6a31eff27397a76207e4
DIFF: https://github.com/llvm/llvm-project/commit/3c514d31d7913f7841ed6a31eff27397a76207e4.diff

LOG: [EarlyCSE] Update tests to use opaque pointers (NFC)

Update the EarlyCSE tests to use opaque pointers.

Worth noting that this leaves some bitcast ptr to ptr instructions
in the input IR behind which are no longer necessary. This is
because these use numbered instructions, so it's hard to drop them
in an automated fashion (as it would require renumbering all other
instructions as well). I'm leaving that as a problem for another day.

The test updates have been performed using
https://gist.github.com/nikic/98357b71fd67756b0f064c9517b62a34.

Differential Revision: https://reviews.llvm.org/D127278

Added: 
    

Modified: 
    llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll
    llvm/test/Transforms/EarlyCSE/AArch64/ldstN.ll
    llvm/test/Transforms/EarlyCSE/AMDGPU/intrinsics.ll
    llvm/test/Transforms/EarlyCSE/PowerPC/read-reg.ll
    llvm/test/Transforms/EarlyCSE/X86/preserve_memoryssa.ll
    llvm/test/Transforms/EarlyCSE/atomics.ll
    llvm/test/Transforms/EarlyCSE/basic.ll
    llvm/test/Transforms/EarlyCSE/commute.ll
    llvm/test/Transforms/EarlyCSE/conditional.ll
    llvm/test/Transforms/EarlyCSE/const-speculation.ll
    llvm/test/Transforms/EarlyCSE/debug-info-undef.ll
    llvm/test/Transforms/EarlyCSE/debuginfo-dce.ll
    llvm/test/Transforms/EarlyCSE/edge.ll
    llvm/test/Transforms/EarlyCSE/fence.ll
    llvm/test/Transforms/EarlyCSE/flags.ll
    llvm/test/Transforms/EarlyCSE/floatingpoint.ll
    llvm/test/Transforms/EarlyCSE/gc_relocate.ll
    llvm/test/Transforms/EarlyCSE/getmatchingvalue-crash.ll
    llvm/test/Transforms/EarlyCSE/guards.ll
    llvm/test/Transforms/EarlyCSE/int_sideeffect.ll
    llvm/test/Transforms/EarlyCSE/invariant-loads.ll
    llvm/test/Transforms/EarlyCSE/invariant.start.ll
    llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll
    llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll
    llvm/test/Transforms/EarlyCSE/memoryssa.ll
    llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll
    llvm/test/Transforms/EarlyCSE/phi.ll
    llvm/test/Transforms/EarlyCSE/pr33406.ll
    llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
    llvm/test/Transforms/EarlyCSE/writeonly.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll b/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll
index a7be718a8b466..ee090988001d8 100644
--- a/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll
+++ b/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll
@@ -3,11 +3,11 @@
 ; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -passes=early-cse | FileCheck %s
 ; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -aa-pipeline=basic-aa -passes='early-cse<memssa>' | FileCheck %s
 
-define <4 x i32> @test_cse(i32* %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
+define <4 x i32> @test_cse(ptr %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
 entry:
 ; Check that @llvm.aarch64.neon.ld2 is optimized away by Early CSE.
 ; CHECK-LABEL: @test_cse
-; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
+; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0
   %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0
   %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1
   br label %for.cond
@@ -19,14 +19,14 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %0 = bitcast i32* %a to i8*
+  %0 = bitcast ptr %a to ptr
   %1 = bitcast <4 x i32> %s.coerce.fca.0.extract to <16 x i8>
   %2 = bitcast <4 x i32> %s.coerce.fca.1.extract to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <4 x i32>
   %4 = bitcast <16 x i8> %2 to <4 x i32>
-  call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %4, i8* %0)
-  %5 = bitcast i32* %a to i8*
-  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5)
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %4, ptr %0)
+  %5 = bitcast ptr %a to ptr
+  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %5)
   %vld2.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 1
   %call = call <4 x i32> @vaddq_s32(<4 x i32> %vld2.fca.0.extract, <4 x i32> %vld2.fca.0.extract)
@@ -37,12 +37,12 @@ for.end:                                          ; preds = %for.cond
   ret <4 x i32> %res.0
 }
 
-define <4 x i32> @test_cse2(i32* %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
+define <4 x i32> @test_cse2(ptr %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
 entry:
 ; Check that the first @llvm.aarch64.neon.st2 is optimized away by Early CSE.
 ; CHECK-LABEL: @test_cse2
-; CHECK-NOT: call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %3, i8* %0)
-; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %s.coerce.fca.0.extract, <4 x i32> %s.coerce.fca.1.extract, i8* %0)
+; CHECK-NOT: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %3, ptr %0)
+; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %s.coerce.fca.0.extract, <4 x i32> %s.coerce.fca.1.extract, ptr %a)
   %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0
   %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1
   br label %for.cond
@@ -54,15 +54,15 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %0 = bitcast i32* %a to i8*
+  %0 = bitcast ptr %a to ptr
   %1 = bitcast <4 x i32> %s.coerce.fca.0.extract to <16 x i8>
   %2 = bitcast <4 x i32> %s.coerce.fca.1.extract to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <4 x i32>
   %4 = bitcast <16 x i8> %2 to <4 x i32>
-  call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %3, i8* %0)
-  call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %4, i8* %0)
-  %5 = bitcast i32* %a to i8*
-  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5)
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %3, ptr %0)
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %4, ptr %0)
+  %5 = bitcast ptr %a to ptr
+  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %5)
   %vld2.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 1
   %call = call <4 x i32> @vaddq_s32(<4 x i32> %vld2.fca.0.extract, <4 x i32> %vld2.fca.0.extract)
@@ -73,12 +73,12 @@ for.end:                                          ; preds = %for.cond
   ret <4 x i32> %res.0
 }
 
-define <4 x i32> @test_cse3(i32* %a, [2 x <4 x i32>] %s.coerce, i32 %n) #0 {
+define <4 x i32> @test_cse3(ptr %a, [2 x <4 x i32>] %s.coerce, i32 %n) #0 {
 entry:
 ; Check that the first @llvm.aarch64.neon.ld2 is optimized away by Early CSE.
 ; CHECK-LABEL: @test_cse3
-; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
-; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
+; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0
+; CHECK-NOT: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0
   %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0
   %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1
   br label %for.cond
@@ -90,12 +90,12 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %0 = bitcast i32* %a to i8*
-  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %0)
+  %0 = bitcast ptr %a to ptr
+  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %0)
   %vld2.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 1
-  %1 = bitcast i32* %a to i8*
-  %vld22 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %1)
+  %1 = bitcast ptr %a to ptr
+  %vld22 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %1)
   %vld22.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld22, 0
   %vld22.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld22, 1
   %call = call <4 x i32> @vaddq_s32(<4 x i32> %vld2.fca.0.extract, <4 x i32> %vld22.fca.0.extract)
@@ -107,12 +107,12 @@ for.end:                                          ; preds = %for.cond
 }
 
 
-define <4 x i32> @test_nocse(i32* %a, i32* %b, [2 x <4 x i32>] %s.coerce, i32 %n) {
+define <4 x i32> @test_nocse(ptr %a, ptr %b, [2 x <4 x i32>] %s.coerce, i32 %n) {
 entry:
 ; Check that the store prevents @llvm.aarch64.neon.ld2 from being optimized
 ; away by Early CSE.
 ; CHECK-LABEL: @test_nocse
-; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8
+; CHECK: call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0
   %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0
   %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1
   br label %for.cond
@@ -124,15 +124,15 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %0 = bitcast i32* %a to i8*
+  %0 = bitcast ptr %a to ptr
   %1 = bitcast <4 x i32> %s.coerce.fca.0.extract to <16 x i8>
   %2 = bitcast <4 x i32> %s.coerce.fca.1.extract to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <4 x i32>
   %4 = bitcast <16 x i8> %2 to <4 x i32>
-  call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %4, i8* %0)
-  store i32 0, i32* %b, align 4
-  %5 = bitcast i32* %a to i8*
-  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8* %5)
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %4, ptr %0)
+  store i32 0, ptr %b, align 4
+  %5 = bitcast ptr %a to ptr
+  %vld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %5)
   %vld2.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 1
   %call = call <4 x i32> @vaddq_s32(<4 x i32> %vld2.fca.0.extract, <4 x i32> %vld2.fca.0.extract)
@@ -143,12 +143,12 @@ for.end:                                          ; preds = %for.cond
   ret <4 x i32> %res.0
 }
 
-define <4 x i32> @test_nocse2(i32* %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
+define <4 x i32> @test_nocse2(ptr %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
 entry:
 ; Check that @llvm.aarch64.neon.ld3 is not optimized away by Early CSE due
 ; to mismatch between st2 and ld3.
 ; CHECK-LABEL: @test_nocse2
-; CHECK: call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8
+; CHECK: call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0
   %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0
   %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1
   br label %for.cond
@@ -160,14 +160,14 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %0 = bitcast i32* %a to i8*
+  %0 = bitcast ptr %a to ptr
   %1 = bitcast <4 x i32> %s.coerce.fca.0.extract to <16 x i8>
   %2 = bitcast <4 x i32> %s.coerce.fca.1.extract to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <4 x i32>
   %4 = bitcast <16 x i8> %2 to <4 x i32>
-  call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %4, i8* %0)
-  %5 = bitcast i32* %a to i8*
-  %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5)
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %4, ptr %0)
+  %5 = bitcast ptr %a to ptr
+  %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr %5)
   %vld3.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 0
   %vld3.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 2
   %call = call <4 x i32> @vaddq_s32(<4 x i32> %vld3.fca.0.extract, <4 x i32> %vld3.fca.2.extract)
@@ -178,13 +178,13 @@ for.end:                                          ; preds = %for.cond
   ret <4 x i32> %res.0
 }
 
-define <4 x i32> @test_nocse3(i32* %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
+define <4 x i32> @test_nocse3(ptr %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
 entry:
 ; Check that @llvm.aarch64.neon.st3 is not optimized away by Early CSE due to
 ; mismatch between st2 and st3.
 ; CHECK-LABEL: @test_nocse3
-; CHECK: call void @llvm.aarch64.neon.st3.v4i32.p0i8
-; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0i8
+; CHECK: call void @llvm.aarch64.neon.st3.v4i32.p0
+; CHECK: call void @llvm.aarch64.neon.st2.v4i32.p0
   %s.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %s.coerce, 0
   %s.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %s.coerce, 1
   br label %for.cond
@@ -196,15 +196,15 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %0 = bitcast i32* %a to i8*
+  %0 = bitcast ptr %a to ptr
   %1 = bitcast <4 x i32> %s.coerce.fca.0.extract to <16 x i8>
   %2 = bitcast <4 x i32> %s.coerce.fca.1.extract to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <4 x i32>
   %4 = bitcast <16 x i8> %2 to <4 x i32>
-  call void @llvm.aarch64.neon.st3.v4i32.p0i8(<4 x i32> %4, <4 x i32> %3, <4 x i32> %3, i8* %0)
-  call void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32> %3, <4 x i32> %3, i8* %0)
-  %5 = bitcast i32* %a to i8*
-  %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8* %5)
+  call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> %4, <4 x i32> %3, <4 x i32> %3, ptr %0)
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %3, <4 x i32> %3, ptr %0)
+  %5 = bitcast ptr %a to ptr
+  %vld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr %5)
   %vld3.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 0
   %vld3.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 1
   %call = call <4 x i32> @vaddq_s32(<4 x i32> %vld3.fca.0.extract, <4 x i32> %vld3.fca.0.extract)
@@ -216,16 +216,16 @@ for.end:                                          ; preds = %for.cond
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.aarch64.neon.st2.v4i32.p0i8(<4 x i32>, <4 x i32>, i8* nocapture)
+declare void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32>, <4 x i32>, ptr nocapture)
 
 ; Function Attrs: nounwind
-declare void @llvm.aarch64.neon.st3.v4i32.p0i8(<4 x i32>, <4 x i32>, <4 x i32>, i8* nocapture)
+declare void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, ptr nocapture)
 
 ; Function Attrs: nounwind readonly
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i8(i8*)
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr)
 
 ; Function Attrs: nounwind readonly
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i8(i8*)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr)
 
 define internal fastcc <4 x i32> @vaddq_s32(<4 x i32> %__p0, <4 x i32> %__p1) {
 entry:

diff  --git a/llvm/test/Transforms/EarlyCSE/AArch64/ldstN.ll b/llvm/test/Transforms/EarlyCSE/AArch64/ldstN.ll
index 16a5f07684c9a..71ed2915f48b8 100644
--- a/llvm/test/Transforms/EarlyCSE/AArch64/ldstN.ll
+++ b/llvm/test/Transforms/EarlyCSE/AArch64/ldstN.ll
@@ -3,14 +3,14 @@
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0v4i16(<4 x i16>*)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr)
 
 ; Although the store and the ld4 are using the same pointer, the
 ; data can not be reused because ld4 accesses multiple elements.
 define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @foo() {
 entry:
-  store <4 x i16> undef, <4 x i16>* undef, align 8
-  %0 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0v4i16(<4 x i16>* undef)
+  store <4 x i16> undef, ptr undef, align 8
+  %0 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr undef)
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %0
 ; CHECK-LABEL: @foo(
 ; CHECK: store

diff  --git a/llvm/test/Transforms/EarlyCSE/AMDGPU/intrinsics.ll b/llvm/test/Transforms/EarlyCSE/AMDGPU/intrinsics.ll
index 9333edab051b0..fbd9e77f16f4d 100644
--- a/llvm/test/Transforms/EarlyCSE/AMDGPU/intrinsics.ll
+++ b/llvm/test/Transforms/EarlyCSE/AMDGPU/intrinsics.ll
@@ -3,33 +3,33 @@
 ; CHECK-LABEL: @no_cse
 ; CHECK: call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 0, i32 0)
 ; CHECK: call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 4, i32 0)
-define void @no_cse(i32 addrspace(1)* %out, <4 x i32> %in) {
+define void @no_cse(ptr addrspace(1) %out, <4 x i32> %in) {
   %a = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 0, i32 0)
   %b = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 4, i32 0)
   %c = add i32 %a, %b
-  store i32 %c, i32 addrspace(1)* %out
+  store i32 %c, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @cse_zero_offset
 ; CHECK: [[CSE:%[a-z0-9A-Z]+]] = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 0, i32 0)
 ; CHECK: add i32 [[CSE]], [[CSE]]
-define void @cse_zero_offset(i32 addrspace(1)* %out, <4 x i32> %in) {
+define void @cse_zero_offset(ptr addrspace(1) %out, <4 x i32> %in) {
   %a = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 0, i32 0)
   %b = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 0, i32 0)
   %c = add i32 %a, %b
-  store i32 %c, i32 addrspace(1)* %out
+  store i32 %c, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK-LABEL: @cse_nonzero_offset
 ; CHECK: [[CSE:%[a-z0-9A-Z]+]] = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 4, i32 0)
 ; CHECK: add i32 [[CSE]], [[CSE]]
-define void @cse_nonzero_offset(i32 addrspace(1)* %out, <4 x i32> %in) {
+define void @cse_nonzero_offset(ptr addrspace(1) %out, <4 x i32> %in) {
   %a = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 4, i32 0)
   %b = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %in, i32 4, i32 0)
   %c = add i32 %a, %b
-  store i32 %c, i32 addrspace(1)* %out
+  store i32 %c, ptr addrspace(1) %out
   ret void
 }
 

diff  --git a/llvm/test/Transforms/EarlyCSE/PowerPC/read-reg.ll b/llvm/test/Transforms/EarlyCSE/PowerPC/read-reg.ll
index 9beb3b47c6a18..e57dd51cb56fa 100644
--- a/llvm/test/Transforms/EarlyCSE/PowerPC/read-reg.ll
+++ b/llvm/test/Transforms/EarlyCSE/PowerPC/read-reg.ll
@@ -7,7 +7,7 @@ target triple = "powerpc64-unknown-linux-gnu"
 define i64 @f(i64 %x) #0 {
 entry:
   %0 = call i64 @llvm.read_register.i64(metadata !0)
-  call void bitcast (void (...)* @foo to void ()*)()
+  call void @foo()
   %1 = call i64 @llvm.read_register.i64(metadata !0)
   %add = add nsw i64 %0, %1
   ret i64 %add

diff  --git a/llvm/test/Transforms/EarlyCSE/X86/preserve_memoryssa.ll b/llvm/test/Transforms/EarlyCSE/X86/preserve_memoryssa.ll
index ac9e80d9c6e45..2ac895b09e712 100644
--- a/llvm/test/Transforms/EarlyCSE/X86/preserve_memoryssa.ll
+++ b/llvm/test/Transforms/EarlyCSE/X86/preserve_memoryssa.ll
@@ -14,22 +14,22 @@ target triple = "x86_64-unknown-linux-gnu"
 ; it claims. Note that if we replace the GEP indices 2 and 1, AA sees NoAlias
 ; for the last load, before CSE-ing the first 2 loads.
 %struct.ImageParameters = type { i32, i32, i32 }
- at img = external global %struct.ImageParameters*, align 8
+ at img = external global ptr, align 8
 define void @test1_macroblock() {
 entry:
   ; MemoryUse(LoE)
-  %0 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8
+  %0 = load ptr, ptr @img, align 8
 
-  %Pos_2 = getelementptr inbounds %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 2
+  %Pos_2 = getelementptr inbounds %struct.ImageParameters, ptr %0, i64 0, i32 2
   ; 1 = MemoryDef(LoE)
-  store i32 undef, i32* %Pos_2, align 8
+  store i32 undef, ptr %Pos_2, align 8
 
   ; MemoryUse(LoE)
-  %1 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8
+  %1 = load ptr, ptr @img, align 8
 
-  %Pos_1 = getelementptr inbounds %struct.ImageParameters, %struct.ImageParameters* %1, i64 0, i32 1
+  %Pos_1 = getelementptr inbounds %struct.ImageParameters, ptr %1, i64 0, i32 1
   ; MemoryUse(1) MayAlias
-  %2 = load i32, i32* %Pos_1, align 4
+  %2 = load i32, ptr %Pos_1, align 4
   unreachable
 }
 
@@ -38,14 +38,14 @@ entry:
 ; undef they are NoAlias. The Use can be optimized further to LoE. We can
 ; de-optimize uses of replaced instructions, but in general this is not enough
 ; (see next tests).
-%struct.TermS = type { i32, i32, i32, i32, i32, i8* }
+%struct.TermS = type { i32, i32, i32, i32, i32, ptr }
 define fastcc void @test2_term_string() {
 entry:
-  %string = getelementptr inbounds %struct.TermS, %struct.TermS* undef, i64 0, i32 5
+  %string = getelementptr inbounds %struct.TermS, ptr undef, i64 0, i32 5
   ; 1 = MemoryDef(LoE)
-  store i8* undef, i8** %string, align 8
+  store ptr undef, ptr %string, align 8
   ; MemoryUse(1) MustAlias
-  %0 = load i8*, i8** %string, align 8
+  %0 = load ptr, ptr %string, align 8
   unreachable
 }
 
@@ -55,22 +55,22 @@ entry:
 ; When replacing instructions, we can deoptimize all uses of the replaced
 ; instruction and all uses of transitive accesses. However this does not stop
 ; MemorySSA from being tripped by AA (see test4).
-%struct.Grammar = type { i8*, i8*, %struct.anon }
-%struct.anon = type { i32, i32, %struct.Term**, [3 x %struct.Term*] }
+%struct.Grammar = type { ptr, ptr, %struct.anon }
+%struct.anon = type { i32, i32, ptr, [3 x ptr] }
 %struct.Term = type { i32 }
 
-define fastcc void @test3_term_string(%struct.Grammar* %g) {
+define fastcc void @test3_term_string(ptr %g) {
 entry:
   ; 1 = MemoryDef(LoE)
-  store i8* undef, i8** undef, align 8
+  store ptr undef, ptr undef, align 8
   ; MemoryUse(LoE)
-  %0 = load i8*, i8** undef, align 8
-  %arrayidx = getelementptr inbounds i8, i8* %0, i64 undef
+  %0 = load ptr, ptr undef, align 8
+  %arrayidx = getelementptr inbounds i8, ptr %0, i64 undef
   ; 2 = MemoryDef(1)
-  store i8 0, i8* %arrayidx, align 1
-  %v = getelementptr inbounds %struct.Grammar, %struct.Grammar* %g, i64 0, i32 2, i32 2
+  store i8 0, ptr %arrayidx, align 1
+  %v = getelementptr inbounds %struct.Grammar, ptr %g, i64 0, i32 2, i32 2
   ; MemoryUse(2) MayAlias
-  %1 = load %struct.Term**, %struct.Term*** %v, align 8
+  %1 = load ptr, ptr %v, align 8
   unreachable
 }
 
@@ -86,8 +86,8 @@ entry:
 ; for the updated IR) is to recompute it from scratch. What we get now is still
 ; a correct update, but with accesses that claim to be optimized and can be
 ; optimized further if we were to re-run MemorySSA on the IR.
-%struct.gnode.0.1.3.6.9.18.20.79 = type { i32, i32, i32, i32, i32, i32, i32, %struct.gnode.0.1.3.6.9.18.20.79* }
- at gnodeArray = external global %struct.gnode.0.1.3.6.9.18.20.79**, align 8
+%struct.gnode.0.1.3.6.9.18.20.79 = type { i32, i32, i32, i32, i32, i32, i32, ptr }
+ at gnodeArray = external global ptr, align 8
 
 define void @test4_shortest() {
 entry:
@@ -95,43 +95,43 @@ entry:
   br i1 undef, label %if.then274, label %for.cond404
 
 if.then274:                                       ; preds = %if.end256
-  %0 = bitcast [5 x i32]* %exl.i to i8*
-  %arrayidx.i = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 1
-  %arrayidx1.i = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 2
-  %arrayidx2.i = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 3
-  %arrayidx3.i = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 4
-  %1 = bitcast [5 x i32]* %exl.i to i8*
-  %arrayidx.i1034 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 1
-  %arrayidx1.i1035 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 2
-  %arrayidx2.i1036 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 3
-  %arrayidx3.i1037 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 4
+  %0 = bitcast ptr %exl.i to ptr
+  %arrayidx.i = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 1
+  %arrayidx1.i = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 2
+  %arrayidx2.i = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 3
+  %arrayidx3.i = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 4
+  %1 = bitcast ptr %exl.i to ptr
+  %arrayidx.i1034 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 1
+  %arrayidx1.i1035 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 2
+  %arrayidx2.i1036 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 3
+  %arrayidx3.i1037 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 4
   unreachable
 
 for.cond404:                                      ; preds = %if.end256
-  %2 = bitcast [5 x i32]* %exl.i to i8*
-  %arrayidx.i960 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 1
-  %arrayidx1.i961 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 2
-  %arrayidx2.i962 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 3
+  %2 = bitcast ptr %exl.i to ptr
+  %arrayidx.i960 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 1
+  %arrayidx1.i961 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 2
+  %arrayidx2.i962 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 3
   ; 1 = MemoryDef(LoE)
-  store i32 undef, i32* %arrayidx2.i962, align 4
-  %arrayidx3.i963 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 4
+  store i32 undef, ptr %arrayidx2.i962, align 4
+  %arrayidx3.i963 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 4
 
   ; MemoryUse(LoE)
-  %3 = load %struct.gnode.0.1.3.6.9.18.20.79**, %struct.gnode.0.1.3.6.9.18.20.79*** @gnodeArray, align 8
-  %arrayidx6.i968 = getelementptr inbounds %struct.gnode.0.1.3.6.9.18.20.79*, %struct.gnode.0.1.3.6.9.18.20.79** %3, i64 undef
+  %3 = load ptr, ptr @gnodeArray, align 8
+  %arrayidx6.i968 = getelementptr inbounds ptr, ptr %3, i64 undef
   ; MemoryUse(1) MayAlias
-  %4 = load %struct.gnode.0.1.3.6.9.18.20.79*, %struct.gnode.0.1.3.6.9.18.20.79** %arrayidx6.i968, align 8
+  %4 = load ptr, ptr %arrayidx6.i968, align 8
   br i1 undef, label %for.cond26.preheader.i974, label %if.then20.for.body_crit_edge.i999
 
 for.cond26.preheader.i974:                        ; preds = %if.then20.i996
-  %5 = bitcast [5 x i32]* %exl.i to i8*
-  %arrayidx.i924 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 1
-  %arrayidx1.i925 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 2
-  %arrayidx2.i926 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 3
-  %arrayidx3.i927 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 4
+  %5 = bitcast ptr %exl.i to ptr
+  %arrayidx.i924 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 1
+  %arrayidx1.i925 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 2
+  %arrayidx2.i926 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 3
+  %arrayidx3.i927 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 4
   unreachable
 
 if.then20.for.body_crit_edge.i999:                ; preds = %if.then20.i996
-  %arrayidx9.phi.trans.insert.i997 = getelementptr inbounds [5 x i32], [5 x i32]* %exl.i, i64 0, i64 undef
+  %arrayidx9.phi.trans.insert.i997 = getelementptr inbounds [5 x i32], ptr %exl.i, i64 0, i64 undef
   unreachable
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/atomics.ll b/llvm/test/Transforms/EarlyCSE/atomics.ll
index 4d67858237bc9..34293de037ad4 100644
--- a/llvm/test/Transforms/EarlyCSE/atomics.ll
+++ b/llvm/test/Transforms/EarlyCSE/atomics.ll
@@ -2,56 +2,56 @@
 ; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s
 ; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s
 
-define i32 @test12(i1 %B, i32* %P1, i32* %P2) {
+define i32 @test12(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test12(
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, i32* [[P2:%.*]] seq_cst, align 4
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr [[P2:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, ptr [[P1]], align 4
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[B:%.*]], i32 [[LOAD0]], i32 [[LOAD1]]
 ; CHECK-NEXT:    ret i32 [[SEL]]
 ;
-  %load0 = load i32, i32* %P1
-  %1 = load atomic i32, i32* %P2 seq_cst, align 4
-  %load1 = load i32, i32* %P1
+  %load0 = load i32, ptr %P1
+  %1 = load atomic i32, ptr %P2 seq_cst, align 4
+  %load1 = load i32, ptr %P1
   %sel = select i1 %B, i32 %load0, i32 %load1
   ret i32 %sel
 }
 
 ; atomic to non-atomic forwarding is legal
-define i32 @test13(i1 %B, i32* %P1) {
+define i32 @test13(i1 %B, ptr %P1) {
 ; CHECK-LABEL: @test13(
-; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1:%.*]] seq_cst, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
-  %b = load i32, i32* %P1
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
+  %b = load i32, ptr %P1
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; atomic to unordered atomic forwarding is legal
-define i32 @test14(i1 %B, i32* %P1) {
+define i32 @test14(i1 %B, ptr %P1) {
 ; CHECK-LABEL: @test14(
-; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1:%.*]] seq_cst, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
-  %b = load atomic i32, i32* %P1 unordered, align 4
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; implementation restriction: can't forward to stonger
 ; than unordered
-define i32 @test15(i1 %B, i32* %P1, i32* %P2) {
+define i32 @test15(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test15(
-; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[P1:%.*]] seq_cst, align 4
-; CHECK-NEXT:    [[B:%.*]] = load atomic i32, i32* [[P1]] seq_cst, align 4
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[P1:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] seq_cst, align 4
 ; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
-  %b = load atomic i32, i32* %P1 seq_cst, align 4
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
+  %b = load atomic i32, ptr %P1 seq_cst, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
@@ -60,247 +60,247 @@ define i32 @test15(i1 %B, i32* %P1, i32* %P2) {
 ; it would be legal to use the later value in place of the
 ; former in this particular example.  We just don't
 ; do that right now.)
-define i32 @test16(i1 %B, i32* %P1, i32* %P2) {
+define i32 @test16(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test16(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    [[B:%.*]] = load atomic i32, i32* [[P1]] unordered, align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load atomic i32, ptr [[P1]] unordered, align 4
 ; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
-  %a = load i32, i32* %P1, align 4
-  %b = load atomic i32, i32* %P1 unordered, align 4
+  %a = load i32, ptr %P1, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; Can't DSE across a full fence
-define void @fence_seq_cst_store(i1 %B, i32* %P1, i32* %P2) {
+define void @fence_seq_cst_store(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_seq_cst_store(
-; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    store atomic i32 0, i32* [[P2:%.*]] seq_cst, align 4
-; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[P2:%.*]] seq_cst, align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P1, align 4
-  store atomic i32 0, i32* %P2 seq_cst, align 4
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
+  store atomic i32 0, ptr %P2 seq_cst, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE across a full fence
-define void @fence_seq_cst(i1 %B, i32* %P1, i32* %P2) {
+define void @fence_seq_cst(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_seq_cst(
-; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1:%.*]], align 4
 ; CHECK-NEXT:    fence seq_cst
-; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   fence seq_cst
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE across a full fence
-define void @fence_asm_sideeffect(i1 %B, i32* %P1, i32* %P2) {
+define void @fence_asm_sideeffect(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_asm_sideeffect(
-; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1:%.*]], align 4
 ; CHECK-NEXT:    call void asm sideeffect "", ""()
-; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   call void asm sideeffect "", ""()
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE across a full fence
-define void @fence_asm_memory(i1 %B, i32* %P1, i32* %P2) {
+define void @fence_asm_memory(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_asm_memory(
-; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1:%.*]], align 4
 ; CHECK-NEXT:    call void asm "", "~{memory}"()
-; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   call void asm "", "~{memory}"()
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't remove a volatile load
-define i32 @volatile_load(i1 %B, i32* %P1, i32* %P2) {
+define i32 @volatile_load(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @volatile_load(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    [[B:%.*]] = load volatile i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load volatile i32, ptr [[P1]], align 4
 ; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
-  %a = load i32, i32* %P1, align 4
-  %b = load volatile i32, i32* %P1, align 4
+  %a = load i32, ptr %P1, align 4
+  %b = load volatile i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; Can't remove redundant volatile loads
-define i32 @redundant_volatile_load(i1 %B, i32* %P1, i32* %P2) {
+define i32 @redundant_volatile_load(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @redundant_volatile_load(
-; CHECK-NEXT:    [[A:%.*]] = load volatile i32, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    [[B:%.*]] = load volatile i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load volatile i32, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    [[B:%.*]] = load volatile i32, ptr [[P1]], align 4
 ; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[A]], [[B]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
-  %a = load volatile i32, i32* %P1, align 4
-  %b = load volatile i32, i32* %P1, align 4
+  %a = load volatile i32, ptr %P1, align 4
+  %b = load volatile i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; Can't DSE a volatile store
-define void @volatile_store(i1 %B, i32* %P1, i32* %P2) {
+define void @volatile_store(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @volatile_store(
-; CHECK-NEXT:    store volatile i32 0, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    store i32 3, i32* [[P1]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    store i32 3, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store volatile i32 0, i32* %P1, align 4
-  store i32 3, i32* %P1, align 4
+  store volatile i32 0, ptr %P1, align 4
+  store i32 3, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE a redundant volatile store
-define void @redundant_volatile_store(i1 %B, i32* %P1, i32* %P2) {
+define void @redundant_volatile_store(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @redundant_volatile_store(
-; CHECK-NEXT:    store volatile i32 0, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store volatile i32 0, i32* %P1, align 4
-  store volatile i32 0, i32* %P1, align 4
+  store volatile i32 0, ptr %P1, align 4
+  store volatile i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can value forward from volatiles
-define i32 @test20(i1 %B, i32* %P1, i32* %P2) {
+define i32 @test20(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test20(
-; CHECK-NEXT:    [[A:%.*]] = load volatile i32, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load volatile i32, ptr [[P1:%.*]], align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %a = load volatile i32, i32* %P1, align 4
-  %b = load i32, i32* %P1, align 4
+  %a = load volatile i32, ptr %P1, align 4
+  %b = load i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; Can DSE a non-volatile store in favor of a volatile one
 ; currently a missed optimization
-define void @test21(i1 %B, i32* %P1, i32* %P2) {
+define void @test21(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test21(
-; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 3, i32* [[P1]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 3, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P1, align 4
-  store volatile i32 3, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
+  store volatile i32 3, ptr %P1, align 4
   ret void
 }
 
 ; Can DSE a normal store in favor of a unordered one
-define void @test22(i1 %B, i32* %P1, i32* %P2) {
+define void @test22(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test22(
-; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 3, ptr [[P1:%.*]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* %P1, align 4
-  store atomic i32 3, i32* %P1 unordered, align 4
+  store i32 0, ptr %P1, align 4
+  store atomic i32 3, ptr %P1 unordered, align 4
   ret void
 }
 
 ; Can also DSE a unordered store in favor of a normal one
-define void @test23(i1 %B, i32* %P1, i32* %P2) {
+define void @test23(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test23(
-; CHECK-NEXT:    store i32 0, i32* [[P1:%.*]], align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 3, i32* %P1 unordered, align 4
-  store i32 0, i32* %P1, align 4
+  store atomic i32 3, ptr %P1 unordered, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; As an implementation limitation, can't remove ordered stores
 ; Note that we could remove the earlier store if we could
 ; represent the required ordering.
-define void @test24(i1 %B, i32* %P1, i32* %P2) {
+define void @test24(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test24(
-; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] release, align 4
-; CHECK-NEXT:    store i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    store atomic i32 3, ptr [[P1:%.*]] release, align 4
+; CHECK-NEXT:    store i32 0, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 3, i32* %P1 release, align 4
-  store i32 0, i32* %P1, align 4
+  store atomic i32 3, ptr %P1 release, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't remove volatile stores - each is independently observable and
 ; the count of such stores is an observable program side effect.
-define void @test25(i1 %B, i32* %P1, i32* %P2) {
+define void @test25(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test25(
-; CHECK-NEXT:    store volatile i32 3, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 0, i32* [[P1]], align 4
+; CHECK-NEXT:    store volatile i32 3, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 0, ptr [[P1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store volatile i32 3, i32* %P1, align 4
-  store volatile i32 0, i32* %P1, align 4
+  store volatile i32 3, ptr %P1, align 4
+  store volatile i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can DSE a unordered store in favor of a unordered one
-define void @test26(i1 %B, i32* %P1, i32* %P2) {
+define void @test26(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test26(
-; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 3, ptr [[P1:%.*]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 0, i32* %P1 unordered, align 4
-  store atomic i32 3, i32* %P1 unordered, align 4
+  store atomic i32 0, ptr %P1 unordered, align 4
+  store atomic i32 3, ptr %P1 unordered, align 4
   ret void
 }
 
 ; Can DSE a unordered store in favor of a ordered one,
 ; but current don't due to implementation limits
-define void @test27(i1 %B, i32* %P1, i32* %P2) {
+define void @test27(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test27(
-; CHECK-NEXT:    store atomic i32 0, i32* [[P1:%.*]] unordered, align 4
-; CHECK-NEXT:    store atomic i32 3, i32* [[P1]] release, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 3, ptr [[P1]] release, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 0, i32* %P1 unordered, align 4
-  store atomic i32 3, i32* %P1 release, align 4
+  store atomic i32 0, ptr %P1 unordered, align 4
+  store atomic i32 3, ptr %P1 release, align 4
   ret void
 }
 
 ; Can DSE an unordered atomic store in favor of an
 ; ordered one, but current don't due to implementation limits
-define void @test28(i1 %B, i32* %P1, i32* %P2) {
+define void @test28(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test28(
-; CHECK-NEXT:    store atomic i32 0, i32* [[P1:%.*]] unordered, align 4
-; CHECK-NEXT:    store atomic i32 3, i32* [[P1]] release, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[P1:%.*]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 3, ptr [[P1]] release, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 0, i32* %P1 unordered, align 4
-  store atomic i32 3, i32* %P1 release, align 4
+  store atomic i32 0, ptr %P1 unordered, align 4
+  store atomic i32 3, ptr %P1 release, align 4
   ret void
 }
 
 ; As an implementation limitation, can't remove ordered stores
 ; see also: @test24
-define void @test29(i1 %B, i32* %P1, i32* %P2) {
+define void @test29(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test29(
-; CHECK-NEXT:    store atomic i32 3, i32* [[P1:%.*]] release, align 4
-; CHECK-NEXT:    store atomic i32 0, i32* [[P1]] unordered, align 4
+; CHECK-NEXT:    store atomic i32 3, ptr [[P1:%.*]] release, align 4
+; CHECK-NEXT:    store atomic i32 0, ptr [[P1]] unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store atomic i32 3, i32* %P1 release, align 4
-  store atomic i32 0, i32* %P1 unordered, align 4
+  store atomic i32 3, ptr %P1 release, align 4
+  store atomic i32 0, ptr %P1 unordered, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/basic.ll b/llvm/test/Transforms/EarlyCSE/basic.ll
index df4c5c6c13ac3..759a0f4e47568 100644
--- a/llvm/test/Transforms/EarlyCSE/basic.ll
+++ b/llvm/test/Transforms/EarlyCSE/basic.ll
@@ -5,291 +5,291 @@
 
 declare void @llvm.assume(i1) nounwind
 
-define void @test1(i8 %V, i32 *%P) {
+define void @test1(i8 %V, ptr%P) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    store i32 23, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 23, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    [[C:%.*]] = zext i8 [[V:%.*]] to i32
-; CHECK-NEXT:    store volatile i32 [[C]], i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 [[C]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[C]], ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[C]], ptr [[P]], align 4
 ; CHECK-NEXT:    [[E:%.*]] = add i32 [[C]], [[C]]
-; CHECK-NEXT:    store volatile i32 [[E]], i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 [[E]], i32* [[P]], align 4
-; CHECK-NEXT:    store volatile i32 [[E]], i32* [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[E]], ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[E]], ptr [[P]], align 4
+; CHECK-NEXT:    store volatile i32 [[E]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %A = bitcast i64 42 to double  ;; dead
   %B = add i32 4, 19             ;; constant folds
-  store i32 %B, i32* %P
+  store i32 %B, ptr %P
 
   %C = zext i8 %V to i32
   %D = zext i8 %V to i32  ;; CSE
-  store volatile i32 %C, i32* %P
-  store volatile i32 %D, i32* %P
+  store volatile i32 %C, ptr %P
+  store volatile i32 %D, ptr %P
 
   %E = add i32 %C, %C
   %F = add i32 %C, %C
-  store volatile i32 %E, i32* %P
-  store volatile i32 %F, i32* %P
+  store volatile i32 %E, ptr %P
+  store volatile i32 %F, ptr %P
 
   %G = add nuw i32 %C, %C
-  store volatile i32 %G, i32* %P
+  store volatile i32 %G, ptr %P
   ret void
 }
 
 
 ;; Simple load value numbering.
-define i32 @test2(i32 *%P) {
+define i32 @test2(ptr%P) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %V1 = load i32, i32* %P
-  %V2 = load i32, i32* %P
+  %V1 = load i32, ptr %P
+  %V2 = load i32, ptr %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
 
-define i32 @test2a(i32 *%P, i1 %b) {
+define i32 @test2a(ptr%P, i1 %b) {
 ; CHECK-LABEL: @test2a(
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 [[B:%.*]])
 ; CHECK-NEXT:    ret i32 0
 ;
-  %V1 = load i32, i32* %P
+  %V1 = load i32, ptr %P
   tail call void @llvm.assume(i1 %b)
-  %V2 = load i32, i32* %P
+  %V2 = load i32, ptr %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
 
 ;; Cross block load value numbering.
-define i32 @test3(i32 *%P, i1 %Cond) {
+define i32 @test3(ptr%P, i1 %Cond) {
 ; CHECK-LABEL: @test3(
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
 ; CHECK:       T:
-; CHECK-NEXT:    store i32 4, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 4, ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 42
 ; CHECK:       F:
 ; CHECK-NEXT:    ret i32 0
 ;
-  %V1 = load i32, i32* %P
+  %V1 = load i32, ptr %P
   br i1 %Cond, label %T, label %F
 T:
-  store i32 4, i32* %P
+  store i32 4, ptr %P
   ret i32 42
 F:
-  %V2 = load i32, i32* %P
+  %V2 = load i32, ptr %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
 
-define i32 @test3a(i32 *%P, i1 %Cond, i1 %b) {
+define i32 @test3a(ptr%P, i1 %Cond, i1 %b) {
 ; CHECK-LABEL: @test3a(
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
 ; CHECK:       T:
-; CHECK-NEXT:    store i32 4, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 4, ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 42
 ; CHECK:       F:
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 [[B:%.*]])
 ; CHECK-NEXT:    ret i32 0
 ;
-  %V1 = load i32, i32* %P
+  %V1 = load i32, ptr %P
   br i1 %Cond, label %T, label %F
 T:
-  store i32 4, i32* %P
+  store i32 4, ptr %P
   ret i32 42
 F:
   tail call void @llvm.assume(i1 %b)
-  %V2 = load i32, i32* %P
+  %V2 = load i32, ptr %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
 
 ;; Cross block load value numbering stops when stores happen.
-define i32 @test4(i32 *%P, i1 %Cond) {
+define i32 @test4(ptr%P, i1 %Cond) {
 ; CHECK-LABEL: @test4(
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
 ; CHECK:       T:
 ; CHECK-NEXT:    ret i32 42
 ; CHECK:       F:
-; CHECK-NEXT:    store i32 42, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 42, ptr [[P]], align 4
 ; CHECK-NEXT:    [[DIFF:%.*]] = sub i32 [[V1]], 42
 ; CHECK-NEXT:    ret i32 [[DIFF]]
 ;
-  %V1 = load i32, i32* %P
+  %V1 = load i32, ptr %P
   br i1 %Cond, label %T, label %F
 T:
   ret i32 42
 F:
   ; Clobbers V1
-  store i32 42, i32* %P
+  store i32 42, ptr %P
 
-  %V2 = load i32, i32* %P
+  %V2 = load i32, ptr %P
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
 
-declare i32 @func(i32 *%P) readonly
+declare i32 @func(ptr%P) readonly
 
 ;; Simple call CSE'ing.
-define i32 @test5(i32 *%P) {
+define i32 @test5(ptr%P) {
 ; CHECK-LABEL: @test5(
-; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P:%.*]])
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(ptr [[P:%.*]])
 ; CHECK-NEXT:    ret i32 0
 ;
-  %V1 = call i32 @func(i32* %P)
-  %V2 = call i32 @func(i32* %P)
+  %V1 = call i32 @func(ptr %P)
+  %V2 = call i32 @func(ptr %P)
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
 
 ;; Trivial Store->load forwarding
-define i32 @test6(i32 *%P) {
+define i32 @test6(ptr%P) {
 ; CHECK-LABEL: @test6(
-; CHECK-NEXT:    store i32 42, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 42, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret i32 42
 ;
-  store i32 42, i32* %P
-  %V1 = load i32, i32* %P
+  store i32 42, ptr %P
+  %V1 = load i32, ptr %P
   ret i32 %V1
 }
 
-define i32 @test6a(i32 *%P, i1 %b) {
+define i32 @test6a(ptr%P, i1 %b) {
 ; CHECK-LABEL: @test6a(
-; CHECK-NEXT:    store i32 42, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 42, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 [[B:%.*]])
 ; CHECK-NEXT:    ret i32 42
 ;
-  store i32 42, i32* %P
+  store i32 42, ptr %P
   tail call void @llvm.assume(i1 %b)
-  %V1 = load i32, i32* %P
+  %V1 = load i32, ptr %P
   ret i32 %V1
 }
 
 ;; Trivial dead store elimination.
-define void @test7(i32 *%P) {
+define void @test7(ptr%P) {
 ; CHECK-LABEL: @test7(
-; CHECK-NEXT:    store i32 45, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 45, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 42, i32* %P
-  store i32 45, i32* %P
+  store i32 42, ptr %P
+  store i32 45, ptr %P
   ret void
 }
 
 ;; Readnone functions aren't invalidated by stores.
-define i32 @test8(i32 *%P) {
+define i32 @test8(ptr%P) {
 ; CHECK-LABEL: @test8(
-; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P:%.*]]) #[[ATTR2:[0-9]+]]
-; CHECK-NEXT:    store i32 4, i32* [[P]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(ptr [[P:%.*]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT:    store i32 4, ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %V1 = call i32 @func(i32* %P) readnone
-  store i32 4, i32* %P
-  %V2 = call i32 @func(i32* %P) readnone
+  %V1 = call i32 @func(ptr %P) readnone
+  store i32 4, ptr %P
+  %V2 = call i32 @func(ptr %P) readnone
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
 
 ;; Trivial DSE can't be performed across a readonly call.  The call
 ;; can observe the earlier write.
-define i32 @test9(i32 *%P) {
+define i32 @test9(ptr%P) {
 ; CHECK-LABEL: @test9(
-; CHECK-NEXT:    store i32 4, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P]]) #[[ATTR1:[0-9]+]]
-; CHECK-NEXT:    store i32 5, i32* [[P]], align 4
+; CHECK-NEXT:    store i32 4, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(ptr [[P]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT:    store i32 5, ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 [[V1]]
 ;
-  store i32 4, i32* %P
-  %V1 = call i32 @func(i32* %P) readonly
-  store i32 5, i32* %P
+  store i32 4, ptr %P
+  %V1 = call i32 @func(ptr %P) readonly
+  store i32 5, ptr %P
   ret i32 %V1
 }
 
 ;; Trivial DSE can be performed across a readnone call.
-define i32 @test10(i32 *%P) {
+define i32 @test10(ptr%P) {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(i32* [[P:%.*]]) #[[ATTR2]]
-; CHECK-NEXT:    store i32 5, i32* [[P]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = call i32 @func(ptr [[P:%.*]]) #[[ATTR2]]
+; CHECK-NEXT:    store i32 5, ptr [[P]], align 4
 ; CHECK-NEXT:    ret i32 [[V1]]
 ;
-  store i32 4, i32* %P
-  %V1 = call i32 @func(i32* %P) readnone
-  store i32 5, i32* %P
+  store i32 4, ptr %P
+  %V1 = call i32 @func(ptr %P) readnone
+  store i32 5, ptr %P
   ret i32 %V1
 }
 
 ;; Trivial dead store elimination - should work for an entire series of dead stores too.
-define void @test11(i32 *%P) {
+define void @test11(ptr%P) {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    store i32 45, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 45, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 42, i32* %P
-  store i32 43, i32* %P
-  store i32 44, i32* %P
-  store i32 45, i32* %P
+  store i32 42, ptr %P
+  store i32 43, ptr %P
+  store i32 44, ptr %P
+  store i32 45, ptr %P
   ret void
 }
 
-define i32 @test12(i1 %B, i32* %P1, i32* %P2) {
+define i32 @test12(i1 %B, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @test12(
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, i32* [[P1:%.*]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, i32* [[P2:%.*]] seq_cst, align 4
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, i32* [[P1]], align 4
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, ptr [[P1:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr [[P2:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, ptr [[P1]], align 4
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[B:%.*]], i32 [[LOAD0]], i32 [[LOAD1]]
 ; CHECK-NEXT:    ret i32 [[SEL]]
 ;
-  %load0 = load i32, i32* %P1
-  %1 = load atomic i32, i32* %P2 seq_cst, align 4
-  %load1 = load i32, i32* %P1
+  %load0 = load i32, ptr %P1
+  %1 = load atomic i32, ptr %P2 seq_cst, align 4
+  %load1 = load i32, ptr %P1
   %sel = select i1 %B, i32 %load0, i32 %load1
   ret i32 %sel
 }
 
-define void @dse1(i32 *%P) {
+define void @dse1(ptr%P) {
 ; CHECK-LABEL: @dse1(
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32, i32* %P
-  store i32 %v, i32* %P
+  %v = load i32, ptr %P
+  store i32 %v, ptr %P
   ret void
 }
 
-define void @dse2(i32 *%P) {
+define void @dse2(ptr%P) {
 ; CHECK-LABEL: @dse2(
-; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load atomic i32, i32* %P seq_cst, align 4
-  store i32 %v, i32* %P
+  %v = load atomic i32, ptr %P seq_cst, align 4
+  store i32 %v, ptr %P
   ret void
 }
 
-define void @dse3(i32 *%P) {
+define void @dse3(ptr%P) {
 ; CHECK-LABEL: @dse3(
-; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, ptr [[P:%.*]] seq_cst, align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load atomic i32, i32* %P seq_cst, align 4
-  store atomic i32 %v, i32* %P unordered, align 4
+  %v = load atomic i32, ptr %P seq_cst, align 4
+  store atomic i32 %v, ptr %P unordered, align 4
   ret void
 }
 
-define i32 @dse4(i32 *%P, i32 *%Q) {
+define i32 @dse4(ptr%P, ptr%Q) {
 ; CHECK-LABEL: @dse4(
-; CHECK-NEXT:    [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, ptr [[P:%.*]] unordered, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %a = load i32, i32* %Q
-  %v = load atomic i32, i32* %P unordered, align 4
-  store atomic i32 %v, i32* %P unordered, align 4
-  %b = load i32, i32* %Q
+  %a = load i32, ptr %Q
+  %v = load atomic i32, ptr %P unordered, align 4
+  store atomic i32 %v, ptr %P unordered, align 4
+  %b = load i32, ptr %Q
   %res = sub i32 %a, %b
   ret i32 %res
 }
@@ -300,41 +300,41 @@ define i32 @dse4(i32 *%P, i32 *%Q) {
 ; The only guarantee we have to provide is that each of the loads
 ; has to observe some value written to that location.  We  do
 ; not have to respect the order in which those writes were done.
-define i32 @dse5(i32 *%P, i32 *%Q) {
+define i32 @dse5(ptr%P, ptr%Q) {
 ; CHECK-LABEL: @dse5(
-; CHECK-NEXT:    [[V:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
-; CHECK-NEXT:    [[A:%.*]] = load atomic i32, i32* [[Q:%.*]] unordered, align 4
+; CHECK-NEXT:    [[V:%.*]] = load atomic i32, ptr [[P:%.*]] unordered, align 4
+; CHECK-NEXT:    [[A:%.*]] = load atomic i32, ptr [[Q:%.*]] unordered, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %v = load atomic i32, i32* %P unordered, align 4
-  %a = load atomic i32, i32* %Q unordered, align 4
-  store atomic i32 %v, i32* %P unordered, align 4
-  %b = load atomic i32, i32* %Q unordered, align 4
+  %v = load atomic i32, ptr %P unordered, align 4
+  %a = load atomic i32, ptr %Q unordered, align 4
+  store atomic i32 %v, ptr %P unordered, align 4
+  %b = load atomic i32, ptr %Q unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 
-define void @dse_neg1(i32 *%P) {
+define void @dse_neg1(ptr%P) {
 ; CHECK-LABEL: @dse_neg1(
-; CHECK-NEXT:    store i32 5, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    store i32 5, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32, i32* %P
-  store i32 5, i32* %P
+  %v = load i32, ptr %P
+  store i32 5, ptr %P
   ret void
 }
 
 ; Could remove the store, but only if ordering was somehow
 ; encoded.
-define void @dse_neg2(i32 *%P) {
+define void @dse_neg2(ptr%P) {
 ; CHECK-LABEL: @dse_neg2(
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    store atomic i32 [[V]], i32* [[P]] seq_cst, align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    store atomic i32 [[V]], ptr [[P]] seq_cst, align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v = load i32, i32* %P
-  store atomic i32 %v, i32* %P seq_cst, align 4
+  %v = load i32, ptr %P
+  store atomic i32 %v, ptr %P seq_cst, align 4
   ret void
 }
 
@@ -343,16 +343,16 @@ declare i32 @reads_c(i32 returned)
 define void @pr28763() {
 ; CHECK-LABEL: @pr28763(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store i32 0, i32* @c, align 4
+; CHECK-NEXT:    store i32 0, ptr @c, align 4
 ; CHECK-NEXT:    [[CALL:%.*]] = call i32 @reads_c(i32 0)
-; CHECK-NEXT:    store i32 2, i32* @c, align 4
+; CHECK-NEXT:    store i32 2, ptr @c, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %load = load i32, i32* @c, align 4
-  store i32 0, i32* @c, align 4
+  %load = load i32, ptr @c, align 4
+  store i32 0, ptr @c, align 4
   %call = call i32 @reads_c(i32 0)
-  store i32 2, i32* @c, align 4
+  store i32 2, ptr @c, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/EarlyCSE/commute.ll b/llvm/test/Transforms/EarlyCSE/commute.ll
index de14d6eb9bfb4..f442bf890045e 100644
--- a/llvm/test/Transforms/EarlyCSE/commute.ll
+++ b/llvm/test/Transforms/EarlyCSE/commute.ll
@@ -2,89 +2,89 @@
 ; RUN: opt < %s -S -early-cse -earlycse-debug-hash | FileCheck %s
 ; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s
 
-define void @test1(float %A, float %B, float* %PA, float* %PB) {
+define void @test1(float %A, float %B, ptr %PA, ptr %PB) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    [[C:%.*]] = fadd float [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    store float [[C]], float* [[PA:%.*]], align 4
-; CHECK-NEXT:    store float [[C]], float* [[PB:%.*]], align 4
+; CHECK-NEXT:    store float [[C]], ptr [[PA:%.*]], align 4
+; CHECK-NEXT:    store float [[C]], ptr [[PB:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %C = fadd float %A, %B
-  store float %C, float* %PA
+  store float %C, ptr %PA
   %D = fadd float %B, %A
-  store float %D, float* %PB
+  store float %D, ptr %PB
   ret void
 }
 
-define void @test2(float %A, float %B, i1* %PA, i1* %PB) {
+define void @test2(float %A, float %B, ptr %PA, ptr %PB) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    [[C:%.*]] = fcmp oeq float [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    store i1 [[C]], i1* [[PA:%.*]], align 1
-; CHECK-NEXT:    store i1 [[C]], i1* [[PB:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PA:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PB:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %C = fcmp oeq float %A, %B
-  store i1 %C, i1* %PA
+  store i1 %C, ptr %PA
   %D = fcmp oeq float %B, %A
-  store i1 %D, i1* %PB
+  store i1 %D, ptr %PB
   ret void
 }
 
-define void @test3(float %A, float %B, i1* %PA, i1* %PB) {
+define void @test3(float %A, float %B, ptr %PA, ptr %PB) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:    [[C:%.*]] = fcmp uge float [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    store i1 [[C]], i1* [[PA:%.*]], align 1
-; CHECK-NEXT:    store i1 [[C]], i1* [[PB:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PA:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PB:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %C = fcmp uge float %A, %B
-  store i1 %C, i1* %PA
+  store i1 %C, ptr %PA
   %D = fcmp ule float %B, %A
-  store i1 %D, i1* %PB
+  store i1 %D, ptr %PB
   ret void
 }
 
-define void @test4(i32 %A, i32 %B, i1* %PA, i1* %PB) {
+define void @test4(i32 %A, i32 %B, ptr %PA, ptr %PB) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    store i1 [[C]], i1* [[PA:%.*]], align 1
-; CHECK-NEXT:    store i1 [[C]], i1* [[PB:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PA:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PB:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %C = icmp eq i32 %A, %B
-  store i1 %C, i1* %PA
+  store i1 %C, ptr %PA
   %D = icmp eq i32 %B, %A
-  store i1 %D, i1* %PB
+  store i1 %D, ptr %PB
   ret void
 }
 
-define void @test5(i32 %A, i32 %B, i1* %PA, i1* %PB) {
+define void @test5(i32 %A, i32 %B, ptr %PA, ptr %PB) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:    [[C:%.*]] = icmp sgt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    store i1 [[C]], i1* [[PA:%.*]], align 1
-; CHECK-NEXT:    store i1 [[C]], i1* [[PB:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PA:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C]], ptr [[PB:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %C = icmp sgt i32 %A, %B
-  store i1 %C, i1* %PA
+  store i1 %C, ptr %PA
   %D = icmp slt i32 %B, %A
-  store i1 %D, i1* %PB
+  store i1 %D, ptr %PB
   ret void
 }
 
 ; Test degenerate case of commuted compare of identical comparands.
 
-define void @test6(float %f, i1* %p1, i1* %p2) {
+define void @test6(float %f, ptr %p1, ptr %p2) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    [[C1:%.*]] = fcmp ult float [[F:%.*]], [[F]]
-; CHECK-NEXT:    store i1 [[C1]], i1* [[P1:%.*]], align 1
-; CHECK-NEXT:    store i1 [[C1]], i1* [[P2:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C1]], ptr [[P1:%.*]], align 1
+; CHECK-NEXT:    store i1 [[C1]], ptr [[P2:%.*]], align 1
 ; CHECK-NEXT:    ret void
 ;
   %c1 = fcmp ult float %f, %f
   %c2 = fcmp ugt float %f, %f
-  store i1 %c1, i1* %p1
-  store i1 %c2, i1* %p2
+  store i1 %c1, ptr %p1
+  store i1 %c2, ptr %p2
   ret void
 }
 
@@ -746,29 +746,29 @@ define i32 @inverted_max(i32 %i) {
 ; values, and we run this test with -earlycse-debug-hash which would catch
 ; the disagreement and fail if it regressed.  This test also includes a
 ; negation of each negation to check for the same issue one level deeper.
-define void @not_not_min(i32* %px, i32* %py, i32* %pout) {
+define void @not_not_min(ptr %px, ptr %py, ptr %pout) {
 ; CHECK-LABEL: @not_not_min(
-; CHECK-NEXT:    [[X:%.*]] = load volatile i32, i32* [[PX:%.*]], align 4
-; CHECK-NEXT:    [[Y:%.*]] = load volatile i32, i32* [[PY:%.*]], align 4
+; CHECK-NEXT:    [[X:%.*]] = load volatile i32, ptr [[PX:%.*]], align 4
+; CHECK-NEXT:    [[Y:%.*]] = load volatile i32, ptr [[PY:%.*]], align 4
 ; CHECK-NEXT:    [[CMPA:%.*]] = icmp slt i32 [[X]], [[Y]]
 ; CHECK-NEXT:    [[CMPB:%.*]] = xor i1 [[CMPA]], true
 ; CHECK-NEXT:    [[RA:%.*]] = select i1 [[CMPA]], i32 [[X]], i32 [[Y]]
-; CHECK-NEXT:    store volatile i32 [[RA]], i32* [[POUT:%.*]], align 4
-; CHECK-NEXT:    store volatile i32 [[RA]], i32* [[POUT]], align 4
-; CHECK-NEXT:    store volatile i32 [[RA]], i32* [[POUT]], align 4
+; CHECK-NEXT:    store volatile i32 [[RA]], ptr [[POUT:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 [[RA]], ptr [[POUT]], align 4
+; CHECK-NEXT:    store volatile i32 [[RA]], ptr [[POUT]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %x = load volatile i32, i32* %px
-  %y = load volatile i32, i32* %py
+  %x = load volatile i32, ptr %px
+  %y = load volatile i32, ptr %py
   %cmpa = icmp slt i32 %x, %y
   %cmpb = xor i1 %cmpa, -1
   %cmpc = xor i1 %cmpb, -1
   %ra = select i1 %cmpa, i32 %x, i32 %y
   %rb = select i1 %cmpb, i32 %y, i32 %x
   %rc = select i1 %cmpc, i32 %x, i32 %y
-  store volatile i32 %ra, i32* %pout
-  store volatile i32 %rb, i32* %pout
-  store volatile i32 %rc, i32* %pout
+  store volatile i32 %ra, ptr %pout
+  store volatile i32 %rb, ptr %pout
+  store volatile i32 %rc, ptr %pout
 
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/conditional.ll b/llvm/test/Transforms/EarlyCSE/conditional.ll
index c4b3277633fe0..aeb024e265e3f 100644
--- a/llvm/test/Transforms/EarlyCSE/conditional.ll
+++ b/llvm/test/Transforms/EarlyCSE/conditional.ll
@@ -2,30 +2,30 @@
 ; RUN: opt -basic-aa -early-cse-memssa -S < %s | FileCheck %s
 
 ; Can we CSE a known condition to a constant?
-define i1 @test(i8* %p) {
+define i1 @test(ptr %p) {
 ; CHECK-LABEL: @test
 entry:
-  %cnd1 = icmp eq i8* %p, null
+  %cnd1 = icmp eq ptr %p, null
   br i1 %cnd1, label %taken, label %untaken
 
 taken:
 ; CHECK-LABEL: taken:
 ; CHECK-NEXT: ret i1 true
-  %cnd2 = icmp eq i8* %p, null
+  %cnd2 = icmp eq ptr %p, null
   ret i1 %cnd2
 
 untaken:
 ; CHECK-LABEL: untaken:
 ; CHECK-NEXT: ret i1 false
-  %cnd3 = icmp eq i8* %p, null
+  %cnd3 = icmp eq ptr %p, null
   ret i1 %cnd3
 }
 
 ; We can CSE the condition, but we *don't* know it's value after the merge
-define i1 @test_neg1(i8* %p) {
+define i1 @test_neg1(ptr %p) {
 ; CHECK-LABEL: @test_neg1
 entry:
-  %cnd1 = icmp eq i8* %p, null
+  %cnd1 = icmp eq ptr %p, null
   br i1 %cnd1, label %taken, label %untaken
 
 taken:
@@ -37,30 +37,30 @@ untaken:
 merge:
 ; CHECK-LABEL: merge:
 ; CHECK-NEXT: ret i1 %cnd1
-  %cnd3 = icmp eq i8* %p, null
+  %cnd3 = icmp eq ptr %p, null
   ret i1 %cnd3
 }
 
 ; Check specifically for a case where we have a unique predecessor, but
 ; not a single predecessor.  We can not know the value of the condition here.
-define i1 @test_neg2(i8* %p) {
+define i1 @test_neg2(ptr %p) {
 ; CHECK-LABEL: @test_neg2
 entry:
-  %cnd1 = icmp eq i8* %p, null
+  %cnd1 = icmp eq ptr %p, null
   br i1 %cnd1, label %merge, label %merge
 
 merge:
 ; CHECK-LABEL: merge:
 ; CHECK-NEXT: ret i1 %cnd1
-  %cnd3 = icmp eq i8* %p, null
+  %cnd3 = icmp eq ptr %p, null
   ret i1 %cnd3
 }
 
 ; Replace a use rather than CSE
-define i1 @test2(i8* %p) {
+define i1 @test2(ptr %p) {
 ; CHECK-LABEL: @test2
 entry:
-  %cnd = icmp eq i8* %p, null
+  %cnd = icmp eq ptr %p, null
   br i1 %cnd, label %taken, label %untaken
 
 taken:
@@ -75,10 +75,10 @@ untaken:
 }
 
 ; Not legal to replace use given it's not dominated by edge
-define i1 @test2_neg1(i8* %p) {
+define i1 @test2_neg1(ptr %p) {
 ; CHECK-LABEL: @test2_neg1
 entry:
-  %cnd1 = icmp eq i8* %p, null
+  %cnd1 = icmp eq ptr %p, null
   br i1 %cnd1, label %taken, label %untaken
 
 taken:
@@ -94,10 +94,10 @@ merge:
 }
 
 ; Another single predecessor test, but for dominated use
-define i1 @test2_neg2(i8* %p) {
+define i1 @test2_neg2(ptr %p) {
 ; CHECK-LABEL: @test2_neg2
 entry:
-  %cnd1 = icmp eq i8* %p, null
+  %cnd1 = icmp eq ptr %p, null
   br i1 %cnd1, label %merge, label %merge
 
 merge:

diff  --git a/llvm/test/Transforms/EarlyCSE/const-speculation.ll b/llvm/test/Transforms/EarlyCSE/const-speculation.ll
index bf4469ca37331..65e005de0569f 100644
--- a/llvm/test/Transforms/EarlyCSE/const-speculation.ll
+++ b/llvm/test/Transforms/EarlyCSE/const-speculation.ll
@@ -22,8 +22,8 @@ define i1 @test_constant_speculation() {
 ; CHECK:       select:
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[TMP:%.*]] = phi i32* [ null, [[ENTRY:%.*]] ], [ getelementptr inbounds ([[MYSTRUCT:%.*]], %mystruct* @var, i64 0, i32 0), [[SELECT]] ]
-; CHECK-NEXT:    [[RES:%.*]] = icmp eq i32* [[TMP]], null
+; CHECK-NEXT:    [[TMP:%.*]] = phi ptr [ null, [[ENTRY:%.*]] ], [ @var, [[SELECT]] ]
+; CHECK-NEXT:    [[RES:%.*]] = icmp eq ptr [[TMP]], null
 ; CHECK-NEXT:    ret i1 [[RES]]
 ;
 entry:
@@ -32,12 +32,11 @@ entry:
 select:
 
   %tst = icmp eq i32 1, 0
-  %elt = getelementptr %mystruct, %mystruct* @var, i64 0, i32 0
-  %sel = select i1 %tst, i32* null, i32* %elt
+  %sel = select i1 %tst, ptr null, ptr @var
   br label %end
 
 end:
-  %tmp = phi i32* [null, %entry], [%sel, %select]
-  %res = icmp eq i32* %tmp, null
+  %tmp = phi ptr [null, %entry], [%sel, %select]
+  %res = icmp eq ptr %tmp, null
   ret i1 %res
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/debug-info-undef.ll b/llvm/test/Transforms/EarlyCSE/debug-info-undef.ll
index 2d6c5380394fb..0665f73f27215 100644
--- a/llvm/test/Transforms/EarlyCSE/debug-info-undef.ll
+++ b/llvm/test/Transforms/EarlyCSE/debug-info-undef.ll
@@ -7,7 +7,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 define signext i16 @b() !dbg !12 {
 entry:
   call void @llvm.dbg.value(metadata i16 23680, metadata !17, metadata !DIExpression()), !dbg !18
-  %0 = load i8, i8* @a, align 1, !dbg !19, !tbaa !20
+  %0 = load i8, ptr @a, align 1, !dbg !19, !tbaa !20
   %conv = sext i8 %0 to i16, !dbg !19
 
 ; CHECK: call void @llvm.dbg.value(metadata i8 %0, metadata !17, metadata !DIExpression(DW_OP_LLVM_convert, 8, DW_ATE_signed, DW_OP_LLVM_convert, 16, DW_ATE_signed, DW_OP_stack_value)), !dbg !18
@@ -15,7 +15,7 @@ entry:
 
   call void @llvm.dbg.value(metadata i16 %conv, metadata !17, metadata !DIExpression()), !dbg !18
   %call = call i32 (...) @optimize_me_not(), !dbg !23
-  %1 = load i8, i8* @a, align 1, !dbg !24, !tbaa !20
+  %1 = load i8, ptr @a, align 1, !dbg !24, !tbaa !20
   %conv1 = sext i8 %1 to i16, !dbg !24
   ret i16 %conv1, !dbg !25
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/debuginfo-dce.ll b/llvm/test/Transforms/EarlyCSE/debuginfo-dce.ll
index 20a9805302742..7bc5c06bf5058 100644
--- a/llvm/test/Transforms/EarlyCSE/debuginfo-dce.ll
+++ b/llvm/test/Transforms/EarlyCSE/debuginfo-dce.ll
@@ -5,21 +5,21 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 define i32 @foo() !dbg !6 {
 entry:
   %0 = call i64 @llvm.ctpop.i64(i64 0), !dbg !14
-  %1 = inttoptr i64 %0 to i32*, !dbg !14
-  call void @llvm.dbg.value(metadata i32* %1, i64 0, metadata !11, metadata !13), !dbg !14
+  %1 = inttoptr i64 %0 to ptr, !dbg !14
+  call void @llvm.dbg.value(metadata ptr %1, i64 0, metadata !11, metadata !13), !dbg !14
 ; CHECK: call void @llvm.dbg.value(metadata i64 0, metadata !11, metadata !DIExpression()), !dbg !13
-  %call = call i32* (...) @baa(), !dbg !15
-  %2 = ptrtoint i32* %call to i64, !dbg !16
-  %3 = inttoptr i64 %2 to i32*, !dbg !16
-  call void @llvm.dbg.value(metadata i32* %3, i64 0, metadata !11, metadata !13), !dbg !14
-  %tobool = icmp ne i32* %3, null, !dbg !17
+  %call = call ptr (...) @baa(), !dbg !15
+  %2 = ptrtoint ptr %call to i64, !dbg !16
+  %3 = inttoptr i64 %2 to ptr, !dbg !16
+  call void @llvm.dbg.value(metadata ptr %3, i64 0, metadata !11, metadata !13), !dbg !14
+  %tobool = icmp ne ptr %3, null, !dbg !17
   br i1 %tobool, label %if.end, label %if.then, !dbg !19
 
 if.then:                                          ; preds = %entry
   br label %cleanup, !dbg !20
 
 if.end:                                           ; preds = %entry
-  %4 = ptrtoint i32* %3 to i32, !dbg !21
+  %4 = ptrtoint ptr %3 to i32, !dbg !21
   br label %cleanup, !dbg !22
 
 cleanup:                                          ; preds = %if.end, %if.then
@@ -27,7 +27,7 @@ cleanup:                                          ; preds = %if.end, %if.then
   ret i32 %retval.0, !dbg !22
 }
 
-declare i32* @baa(...)
+declare ptr @baa(...)
 
 ; Function Attrs: nounwind readnone
 declare i64 @llvm.ctpop.i64(i64)

diff  --git a/llvm/test/Transforms/EarlyCSE/edge.ll b/llvm/test/Transforms/EarlyCSE/edge.ll
index bd82502c22290..a790d9035a5f2 100644
--- a/llvm/test/Transforms/EarlyCSE/edge.ll
+++ b/llvm/test/Transforms/EarlyCSE/edge.ll
@@ -49,15 +49,15 @@ bb2:
 }
 
 declare void @g(i1)
-define void @f4(i8 * %x)  {
+define void @f4(ptr %x)  {
 ; CHECK-LABEL: define void @f4(
 bb0:
-  %y = icmp eq i8* null, %x
+  %y = icmp eq ptr null, %x
   br i1 %y, label %bb2, label %bb1
 bb1:
   br label %bb2
 bb2:
-  %zed = icmp eq i8* null, %x
+  %zed = icmp eq ptr null, %x
   call void @g(i1 %zed)
 ; CHECK: call void @g(i1 %y)
   ret void

diff  --git a/llvm/test/Transforms/EarlyCSE/fence.ll b/llvm/test/Transforms/EarlyCSE/fence.ll
index 8fb50849ff93a..5ac8cc6c20678 100644
--- a/llvm/test/Transforms/EarlyCSE/fence.ll
+++ b/llvm/test/Transforms/EarlyCSE/fence.ll
@@ -9,28 +9,28 @@
 
 ; We can value forward across the fence since we can (semantically) 
 ; reorder the following load before the fence.
-define i32 @test(i32* %addr.i) {
+define i32 @test(ptr %addr.i) {
 ; CHECK-LABEL: @test
 ; CHECK: store
 ; CHECK: fence
 ; CHECK-NOT: load
 ; CHECK: ret
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   fence release
-  %a = load i32, i32* %addr.i, align 4
+  %a = load i32, ptr %addr.i, align 4
   ret i32 %a
 }
 
 ; Same as above
-define i32 @test2(i32* noalias %addr.i, i32* noalias %otheraddr) {
+define i32 @test2(ptr noalias %addr.i, ptr noalias %otheraddr) {
 ; CHECK-LABEL: @test2
 ; CHECK: load
 ; CHECK: fence
 ; CHECK-NOT: load
 ; CHECK: ret
-  %a = load i32, i32* %addr.i, align 4
+  %a = load i32, ptr %addr.i, align 4
   fence release
-  %a2 = load i32, i32* %addr.i, align 4
+  %a2 = load i32, ptr %addr.i, align 4
   %res = sub i32 %a, %a2
   ret i32 %a
 }
@@ -42,16 +42,16 @@ define i32 @test2(i32* noalias %addr.i, i32* noalias %otheraddr) {
 ; fence.  Note that it would be legal to reorder '%a' after the fence
 ; and then remove '%a2'.  The current implementation doesn't know how
 ; to do this, but if it learned, this test will need revised.
-define i32 @test3(i32* noalias %addr.i, i32* noalias %otheraddr) {
+define i32 @test3(ptr noalias %addr.i, ptr noalias %otheraddr) {
 ; CHECK-LABEL: @test3
 ; CHECK: load
 ; CHECK: fence
 ; CHECK: load
 ; CHECK: sub
 ; CHECK: ret
-  %a = load i32, i32* %addr.i, align 4
+  %a = load i32, ptr %addr.i, align 4
   fence acquire
-  %a2 = load i32, i32* %addr.i, align 4
+  %a2 = load i32, ptr %addr.i, align 4
   %res = sub i32 %a, %a2
   ret i32 %res
 }
@@ -60,28 +60,28 @@ define i32 @test3(i32* noalias %addr.i, i32* noalias %otheraddr) {
 ; principal reorder the second store above the fence and then DSE either
 ; store, but this is beyond the simple last-store DSE which EarlyCSE
 ; implements.
-define void @test4(i32* %addr.i) {
+define void @test4(ptr %addr.i) {
 ; CHECK-LABEL: @test4
 ; CHECK: store
 ; CHECK: fence
 ; CHECK: store
 ; CHECK: ret
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   fence release
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   ret void
 }
 
 ; We *could* DSE across this fence, but don't.  No other thread can
 ; observe the order of the acquire fence and the store.
-define void @test5(i32* %addr.i) {
+define void @test5(ptr %addr.i) {
 ; CHECK-LABEL: @test5
 ; CHECK: store
 ; CHECK: fence
 ; CHECK: store
 ; CHECK: ret
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   fence acquire
-  store i32 5, i32* %addr.i, align 4
+  store i32 5, ptr %addr.i, align 4
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/flags.ll b/llvm/test/Transforms/EarlyCSE/flags.ll
index 487ef5ef1a117..1e5cd28ce4a2e 100644
--- a/llvm/test/Transforms/EarlyCSE/flags.ll
+++ b/llvm/test/Transforms/EarlyCSE/flags.ll
@@ -18,32 +18,32 @@ define void @test1(float %x, float %y) {
   ret void
 }
 
-declare void @use.i8(i8*)
+declare void @use.i8(ptr)
 
-define void @test_inbounds_program_ub_if_first_gep_poison(i8* %ptr, i64 %n) {
+define void @test_inbounds_program_ub_if_first_gep_poison(ptr %ptr, i64 %n) {
 ; CHECK-LABEL: @test_inbounds_program_ub_if_first_gep_poison(
-; CHECK-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[N:%.*]]
-; CHECK-NEXT:    call void @use.i8(i8* noundef [[ADD_PTR_1]])
-; CHECK-NEXT:    call void @use.i8(i8* [[ADD_PTR_1]])
+; CHECK-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[N:%.*]]
+; CHECK-NEXT:    call void @use.i8(ptr noundef [[ADD_PTR_1]])
+; CHECK-NEXT:    call void @use.i8(ptr [[ADD_PTR_1]])
 ; CHECK-NEXT:    ret void
 ;
-  %add.ptr.1 = getelementptr inbounds i8, i8* %ptr, i64 %n
-  call void @use.i8(i8* noundef %add.ptr.1)
-  %add.ptr.2 = getelementptr i8, i8* %ptr, i64 %n
-  call void @use.i8(i8* %add.ptr.2)
+  %add.ptr.1 = getelementptr inbounds i8, ptr %ptr, i64 %n
+  call void @use.i8(ptr noundef %add.ptr.1)
+  %add.ptr.2 = getelementptr i8, ptr %ptr, i64 %n
+  call void @use.i8(ptr %add.ptr.2)
   ret void
 }
 
-define void @test_inbounds_program_not_ub_if_first_gep_poison(i8* %ptr, i64 %n) {
+define void @test_inbounds_program_not_ub_if_first_gep_poison(ptr %ptr, i64 %n) {
 ; CHECK-LABEL: @test_inbounds_program_not_ub_if_first_gep_poison(
-; CHECK-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr i8, i8* [[PTR:%.*]], i64 [[N:%.*]]
-; CHECK-NEXT:    call void @use.i8(i8* [[ADD_PTR_1]])
-; CHECK-NEXT:    call void @use.i8(i8* [[ADD_PTR_1]])
+; CHECK-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[N:%.*]]
+; CHECK-NEXT:    call void @use.i8(ptr [[ADD_PTR_1]])
+; CHECK-NEXT:    call void @use.i8(ptr [[ADD_PTR_1]])
 ; CHECK-NEXT:    ret void
 ;
-  %add.ptr.1 = getelementptr inbounds i8, i8* %ptr, i64 %n
-  call void @use.i8(i8* %add.ptr.1)
-  %add.ptr.2 = getelementptr i8, i8* %ptr, i64 %n
-  call void @use.i8(i8* %add.ptr.2)
+  %add.ptr.1 = getelementptr inbounds i8, ptr %ptr, i64 %n
+  call void @use.i8(ptr %add.ptr.1)
+  %add.ptr.2 = getelementptr i8, ptr %ptr, i64 %n
+  call void @use.i8(ptr %add.ptr.2)
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/floatingpoint.ll b/llvm/test/Transforms/EarlyCSE/floatingpoint.ll
index c7579adfdd3cd..c4a0bf92f4182 100644
--- a/llvm/test/Transforms/EarlyCSE/floatingpoint.ll
+++ b/llvm/test/Transforms/EarlyCSE/floatingpoint.ll
@@ -21,16 +21,16 @@ define <4 x float> @fW( <4 x float> %a) {
 }
 
 ; CSE unary fnegs.
-define void @fX(<4 x float> *%p, <4 x float> %a) {
+define void @fX(ptr%p, <4 x float> %a) {
 ; CHECK-LABEL: @fX(
 ; CHECK-NEXT:    [[X:%.*]] = fneg <4 x float> [[A:%.*]]
-; CHECK-NEXT:    store volatile <4 x float> [[X]], <4 x float>* [[P:%.*]], align 16
-; CHECK-NEXT:    store volatile <4 x float> [[X]], <4 x float>* [[P]], align 16
+; CHECK-NEXT:    store volatile <4 x float> [[X]], ptr [[P:%.*]], align 16
+; CHECK-NEXT:    store volatile <4 x float> [[X]], ptr [[P]], align 16
 ; CHECK-NEXT:    ret void
 ;
   %x = fneg <4 x float> %a
   %y = fneg <4 x float> %a
-  store volatile <4 x float> %x, <4 x float>* %p
-  store volatile <4 x float> %y, <4 x float>* %p
+  store volatile <4 x float> %x, ptr %p
+  store volatile <4 x float> %y, ptr %p
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/gc_relocate.ll b/llvm/test/Transforms/EarlyCSE/gc_relocate.ll
index e5097dc61aa2a..beeebe500f3fd 100644
--- a/llvm/test/Transforms/EarlyCSE/gc_relocate.ll
+++ b/llvm/test/Transforms/EarlyCSE/gc_relocate.ll
@@ -4,155 +4,155 @@
 declare void @func()
 declare i32 @"personality_function"()
 
-define i1 @test_trivial(i32 addrspace(1)* %in) gc "statepoint-example" {
+define i1 @test_trivial(ptr addrspace(1) %in) gc "statepoint-example" {
 ; CHECK-LABEL: @test_trivial(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN:%.*]]) ]
-; CHECK-NEXT:    [[A:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 addrspace(1)* [[A]], null
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN:%.*]]) ]
+; CHECK-NEXT:    [[A:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq ptr addrspace(1) [[A]], null
 ; CHECK-NEXT:    ret i1 [[CMP1]]
 ;
 entry:
-  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in)]
-  %a = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  %b = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  %cmp1 = icmp eq i32 addrspace(1)* %a, null
-  %cmp2 = icmp eq i32 addrspace(1)* %b, null
+  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in)]
+  %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  %b = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  %cmp1 = icmp eq ptr addrspace(1) %a, null
+  %cmp2 = icmp eq ptr addrspace(1) %b, null
   %cmp = and i1 %cmp1, %cmp2
   ret i1 %cmp
 }
 
 @G = external global i32
 
-define i1 @test_readnone(i32 addrspace(1)* %in) gc "statepoint-example" {
+define i1 @test_readnone(ptr addrspace(1) %in) gc "statepoint-example" {
 ; CHECK-LABEL: @test_readnone(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN:%.*]]) ]
-; CHECK-NEXT:    [[A:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT:    store i32 0, i32* @G, align 4
-; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 addrspace(1)* [[A]], null
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN:%.*]]) ]
+; CHECK-NEXT:    [[A:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT:    store i32 0, ptr @G, align 4
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq ptr addrspace(1) [[A]], null
 ; CHECK-NEXT:    ret i1 [[CMP1]]
 ;
 entry:
-  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in)]
-  %a = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  store i32 0, i32* @G
-  %b = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  %cmp1 = icmp eq i32 addrspace(1)* %a, null
-  %cmp2 = icmp eq i32 addrspace(1)* %b, null
+  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in)]
+  %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  store i32 0, ptr @G
+  %b = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  %cmp1 = icmp eq ptr addrspace(1) %a, null
+  %cmp2 = icmp eq ptr addrspace(1) %b, null
   %cmp = and i1 %cmp1, %cmp2
   ret i1 %cmp
 }
 
-define i1 @test_call(i32 addrspace(1)* %in) gc "statepoint-example" {
+define i1 @test_call(ptr addrspace(1) %in) gc "statepoint-example" {
 ; CHECK-LABEL: @test_call(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN:%.*]], i32 addrspace(1)* [[IN]]) ]
-; CHECK-NEXT:    [[BASE:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN2:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[BASE]], i32 addrspace(1)* [[BASE]]) ]
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[IN]]) ]
+; CHECK-NEXT:    [[BASE:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN2:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[BASE]], ptr addrspace(1) [[BASE]]) ]
 ; CHECK-NEXT:    br label [[NEXT:%.*]]
 ; CHECK:       next:
-; CHECK-NEXT:    [[BASE_RELOC:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN2]], i32 0, i32 0)
-; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 addrspace(1)* [[BASE_RELOC]], null
+; CHECK-NEXT:    [[BASE_RELOC:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN2]], i32 0, i32 0)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq ptr addrspace(1) [[BASE_RELOC]], null
 ; CHECK-NEXT:    ret i1 [[CMP1]]
 ;
 entry:
-  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in, i32 addrspace(1)* %in)]
-  %base = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  %derived = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 1)
-  %safepoint_token2 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %base, i32 addrspace(1)* %derived)]
+  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in, ptr addrspace(1) %in)]
+  %base = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  %derived = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 1)
+  %safepoint_token2 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %base, ptr addrspace(1) %derived)]
   br label %next
 
 next:
-  %base_reloc = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token2,  i32 0, i32 0)
-  %derived_reloc = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token2,  i32 0, i32 1)
-  %cmp1 = icmp eq i32 addrspace(1)* %base_reloc, null
-  %cmp2 = icmp eq i32 addrspace(1)* %derived_reloc, null
+  %base_reloc = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token2,  i32 0, i32 0)
+  %derived_reloc = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token2,  i32 0, i32 1)
+  %cmp1 = icmp eq ptr addrspace(1) %base_reloc, null
+  %cmp2 = icmp eq ptr addrspace(1) %derived_reloc, null
   %cmp = and i1 %cmp1, %cmp2
   ret i1 %cmp
 }
 
 ; Negative test: Check that relocates from 
diff erent statepoints are not CSE'd
-define i1 @test_two_calls(i32 addrspace(1)* %in1, i32 addrspace(1)* %in2) gc "statepoint-example" {
+define i1 @test_two_calls(ptr addrspace(1) %in1, ptr addrspace(1) %in2) gc "statepoint-example" {
 ; CHECK-LABEL: @test_two_calls(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN1:%.*]], i32 addrspace(1)* [[IN2:%.*]]) ]
-; CHECK-NEXT:    [[IN1_RELOC1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT:    [[IN2_RELOC1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 1, i32 1)
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN2:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN1_RELOC1]], i32 addrspace(1)* [[IN2_RELOC1]]) ]
-; CHECK-NEXT:    [[IN1_RELOC2:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN2]], i32 0, i32 1)
-; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 addrspace(1)* [[IN1_RELOC2]], null
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN1:%.*]], ptr addrspace(1) [[IN2:%.*]]) ]
+; CHECK-NEXT:    [[IN1_RELOC1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT:    [[IN2_RELOC1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 1, i32 1)
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN2:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN1_RELOC1]], ptr addrspace(1) [[IN2_RELOC1]]) ]
+; CHECK-NEXT:    [[IN1_RELOC2:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN2]], i32 0, i32 1)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq ptr addrspace(1) [[IN1_RELOC2]], null
 ; CHECK-NEXT:    ret i1 [[CMP1]]
 ;
 entry:
-  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in1, i32 addrspace(1)* %in2)]
-  %in1.reloc1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  %in2.reloc1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 1, i32 1)
-  %safepoint_token2 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in1.reloc1, i32 addrspace(1)* %in2.reloc1)]
-  %in1.reloc2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token2,  i32 0, i32 1)
-  %in2.reloc2 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token2,  i32 0, i32 1)
-  %cmp1 = icmp eq i32 addrspace(1)* %in1.reloc2, null
-  %cmp2 = icmp eq i32 addrspace(1)* %in2.reloc2, null
+  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in1, ptr addrspace(1) %in2)]
+  %in1.reloc1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  %in2.reloc1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 1, i32 1)
+  %safepoint_token2 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in1.reloc1, ptr addrspace(1) %in2.reloc1)]
+  %in1.reloc2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token2,  i32 0, i32 1)
+  %in2.reloc2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token2,  i32 0, i32 1)
+  %cmp1 = icmp eq ptr addrspace(1) %in1.reloc2, null
+  %cmp2 = icmp eq ptr addrspace(1) %in2.reloc2, null
   %cmp = and i1 %cmp1, %cmp2
   ret i1 %cmp
 }
 
 ; Negative test: Check that relocates from normal and exceptional pathes are not be CSE'd
-define i32 addrspace(1)* @test_invoke(i32 addrspace(1)* %in) gc "statepoint-example" personality i32 ()* @"personality_function" {
+define ptr addrspace(1) @test_invoke(ptr addrspace(1) %in) gc "statepoint-example" personality ptr @"personality_function" {
 ; CHECK-LABEL: @test_invoke(
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN:%.*]]) ]
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN:%.*]]) ]
 ; CHECK-NEXT:    to label [[INVOKE_NORMAL_DEST:%.*]] unwind label [[EXCEPTIONAL_RETURN:%.*]]
 ; CHECK:       invoke_normal_dest:
-; CHECK-NEXT:    [[OUT:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT:    ret i32 addrspace(1)* [[OUT]]
+; CHECK-NEXT:    [[OUT:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT:    ret ptr addrspace(1) [[OUT]]
 ; CHECK:       exceptional_return:
 ; CHECK-NEXT:    [[LANDING_PAD:%.*]] = landingpad token
 ; CHECK-NEXT:    cleanup
-; CHECK-NEXT:    [[OUT1:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[LANDING_PAD]], i32 0, i32 0)
-; CHECK-NEXT:    ret i32 addrspace(1)* [[OUT1]]
+; CHECK-NEXT:    [[OUT1:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[LANDING_PAD]], i32 0, i32 0)
+; CHECK-NEXT:    ret ptr addrspace(1) [[OUT1]]
 ;
-  %safepoint_token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in)]
+  %safepoint_token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in)]
   to label %invoke_normal_dest unwind label %exceptional_return
 
 invoke_normal_dest:
-  %out = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  ret i32 addrspace(1)* %out
+  %out = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  ret ptr addrspace(1) %out
 
 exceptional_return:
   %landing_pad = landingpad token
   cleanup
-  %out1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %landing_pad, i32 0, i32 0)
-  ret i32 addrspace(1)* %out1
+  %out1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %landing_pad, i32 0, i32 0)
+  ret ptr addrspace(1) %out1
 }
 
 ; negative test - neither dominates the other
-define i1 @test_non_dominating(i1 %c, i32 addrspace(1)* %in) gc "statepoint-example" {
+define i1 @test_non_dominating(i1 %c, ptr addrspace(1) %in) gc "statepoint-example" {
 ;
 ; CHECK-LABEL: @test_non_dominating(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* [[IN:%.*]]) ]
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) [[IN:%.*]]) ]
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[TAKEN:%.*]], label [[UNTAKEN:%.*]]
 ; CHECK:       taken:
-; CHECK-NEXT:    [[A:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 addrspace(1)* [[A]], null
+; CHECK-NEXT:    [[A:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr addrspace(1) [[A]], null
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ; CHECK:       untaken:
-; CHECK-NEXT:    [[B:%.*]] = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 addrspace(1)* [[B]], null
+; CHECK-NEXT:    [[B:%.*]] = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token [[SAFEPOINT_TOKEN]], i32 0, i32 0)
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq ptr addrspace(1) [[B]], null
 ; CHECK-NEXT:    ret i1 [[CMP2]]
 ;
 entry:
-  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(i32 addrspace(1)* %in)]
+  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) ["gc-live"(ptr addrspace(1) %in)]
   br i1 %c, label %taken, label %untaken
 taken:
-  %a = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  %cmp = icmp eq i32 addrspace(1)* %a, null
+  %a = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  %cmp = icmp eq ptr addrspace(1) %a, null
   ret i1 %cmp
 untaken:
-  %b = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
-  %cmp2 = icmp eq i32 addrspace(1)* %b, null
+  %b = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
+  %cmp2 = icmp eq ptr addrspace(1) %b, null
   ret i1 %cmp2
 }
 
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)

diff  --git a/llvm/test/Transforms/EarlyCSE/getmatchingvalue-crash.ll b/llvm/test/Transforms/EarlyCSE/getmatchingvalue-crash.ll
index 60791e78ba3ae..40a4c6224c54b 100644
--- a/llvm/test/Transforms/EarlyCSE/getmatchingvalue-crash.ll
+++ b/llvm/test/Transforms/EarlyCSE/getmatchingvalue-crash.ll
@@ -9,29 +9,29 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 target triple = "x86_64-unknown-linux-gnu"
 
 %s.0 = type { %s.1 }
-%s.1 = type { %s.2* }
+%s.1 = type { ptr }
 %s.2 = type { %s.3, %s.6, %s.16 }
 %s.3 = type { %s.4, %s.5 }
-%s.4 = type { i32 (...)**, i64 }
-%s.5 = type { i32 (...)** }
-%s.6 = type <{ %s.7, %s.10, i8*, i32, [4 x i8] }>
-%s.7 = type { i32 (...)**, %s.8, i8*, i8*, i8*, i8*, i8*, i8* }
-%s.8 = type { %s.9* }
+%s.4 = type { ptr, i64 }
+%s.5 = type { ptr }
+%s.6 = type <{ %s.7, %s.10, ptr, i32, [4 x i8] }>
+%s.7 = type { ptr, %s.8, ptr, ptr, ptr, ptr, ptr, ptr }
+%s.8 = type { ptr }
 %s.9 = type opaque
 %s.10 = type { %s.11 }
 %s.11 = type { %s.12 }
 %s.12 = type { %s.13 }
 %s.13 = type { %s.14 }
 %s.14 = type { %s.15 }
-%s.15 = type { i64, i64, i8* }
-%s.16 = type <{ %s.17, %s.18*, i32 }>
-%s.17 = type { i32 (...)**, i32, i64, i64, i32, i32, i8*, i8*, void (i32, %s.17*, i32)**, i32*, i64, i64, i64*, i64, i64, i8**, i64, i64 }
-%s.18 = type { i32 (...)**, %s.16 }
+%s.15 = type { i64, i64, ptr }
+%s.16 = type <{ %s.17, ptr, i32 }>
+%s.17 = type { ptr, i32, i64, i64, i32, i32, ptr, ptr, ptr, ptr, i64, i64, ptr, i64, i64, ptr, i64, i64 }
+%s.18 = type { ptr, %s.16 }
 %s.19 = type { i8, %s.20 }
 %s.20 = type { %s.21 }
-%s.21 = type { %s.22*, %s.24, %s.26 }
-%s.22 = type { %s.23* }
-%s.23 = type <{ %s.22, %s.23*, %s.22*, i8, [7 x i8] }>
+%s.21 = type { ptr, %s.24, %s.26 }
+%s.22 = type { ptr }
+%s.23 = type <{ %s.22, ptr, ptr, i8, [7 x i8] }>
 %s.24 = type { %s.25 }
 %s.25 = type { %s.22 }
 %s.26 = type { %s.27 }
@@ -42,103 +42,101 @@ target triple = "x86_64-unknown-linux-gnu"
 declare i32 @f0(...)
 
 ; Function Attrs: uwtable
-declare void @f1(%s.0* nocapture) align 2
+declare void @f1(ptr nocapture) align 2
 
-declare void @f2(%s.10*, %s.2*)
+declare void @f2(ptr, ptr)
 
-declare void @f3(%s.10*, i8*, i32)
+declare void @f3(ptr, ptr, i32)
 
-define i8* @f4(%s.19* %a0, i8* %a1, i32 %a2, i8* %a3) align 2 personality i8* bitcast (i32 (...)* @f0 to i8*) {
+define ptr @f4(ptr %a0, ptr %a1, i32 %a2, ptr %a3) align 2 personality ptr @f0 {
 b0:
   %v0 = alloca %s.0, align 8
   br label %b1
 
 b1:                                               ; preds = %b0
-  invoke void @f5(%s.10* nonnull sret(%s.10) align 8 undef, i8* nonnull undef)
+  invoke void @f5(ptr nonnull sret(%s.10) align 8 undef, ptr nonnull undef)
           to label %b6 unwind label %b3
 
 b2:                                               ; preds = %b2
-  %v1 = invoke nonnull align 8 dereferenceable(24) %s.10* @f6(%s.10* undef, i64 undef, i64 1)
+  %v1 = invoke nonnull align 8 dereferenceable(24) ptr @f6(ptr undef, i64 undef, i64 1)
           to label %b2 unwind label %b4
 
 b3:                                               ; preds = %b1
-  %v2 = landingpad { i8*, i32 }
+  %v2 = landingpad { ptr, i32 }
           cleanup
   br label %b5
 
 b4:                                               ; preds = %b2
-  %v3 = landingpad { i8*, i32 }
+  %v3 = landingpad { ptr, i32 }
           cleanup
   br label %b5
 
 b5:                                               ; preds = %b4, %b3
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 
 b6:                                               ; preds = %b1
-  invoke void @f1(%s.0* nonnull %v0)
+  invoke void @f1(ptr nonnull %v0)
           to label %b8 unwind label %b7
 
 b7:                                               ; preds = %b6
-  %v4 = landingpad { i8*, i32 }
+  %v4 = landingpad { ptr, i32 }
           cleanup
   br label %b20
 
 b8:                                               ; preds = %b6
-  invoke void @f2(%s.10* sret(%s.10) align 8 undef, %s.2* undef)
+  invoke void @f2(ptr sret(%s.10) align 8 undef, ptr undef)
           to label %b10 unwind label %b14
 
 b9:                                               ; No predecessors!
   br label %b16
 
 b10:                                              ; preds = %b8
-  %v6 = invoke i32 @f7(%s.10* nonnull undef, i64 0, i64 -1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @g0, i64 0, i64 0), i64 undef)
+  %v6 = invoke i32 @f7(ptr nonnull undef, i64 0, i64 -1, ptr @g0, i64 undef)
           to label %b12 unwind label %b11
 
 b11:                                              ; preds = %b10
-  %v7 = landingpad { i8*, i32 }
-          catch i8* null
+  %v7 = landingpad { ptr, i32 }
+          catch ptr null
   unreachable
 
 b12:                                              ; preds = %b10
-  invoke void @f3(%s.10* nonnull sret(%s.10) align 8 undef, i8* %a1, i32 %a2)
+  invoke void @f3(ptr nonnull sret(%s.10) align 8 undef, ptr %a1, i32 %a2)
           to label %b13 unwind label %b15
 
 b13:                                              ; preds = %b12
   unreachable
 
 b14:                                              ; preds = %b8
-  %v8 = landingpad { i8*, i32 }
+  %v8 = landingpad { ptr, i32 }
           cleanup
   br label %b16
 
 b15:                                              ; preds = %b12
-  %v9 = landingpad { i8*, i32 }
+  %v9 = landingpad { ptr, i32 }
           cleanup
   br label %b16
 
 b16:                                              ; preds = %b15, %b14, %b9
-  %v10 = getelementptr inbounds %s.0, %s.0* %v0, i64 0, i32 0
-  %v11 = getelementptr inbounds %s.1, %s.1* %v10, i64 0, i32 0
   br label %b17
 
 b17:                                              ; preds = %b16
-  %v12 = load %s.2*, %s.2** %v11, align 8
+  %v12 = load ptr, ptr %v0, align 8
   br label %b18
 
 b18:                                              ; preds = %b17
-  call void undef(%s.2* nonnull %v12)
+  call void undef(ptr nonnull %v12)
   br label %b19
 
 b19:                                              ; preds = %b18
-  store %s.2* null, %s.2** %v11, align 8
+  store ptr null, ptr %v0, align 8
   br label %b20
 
 b20:                                              ; preds = %b19, %b7
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 }
 
-declare hidden void @f5(%s.10*, i8*)
+declare hidden void @f5(ptr, ptr)
 
-declare %s.10* @f6(%s.10*, i64, i64)
+declare ptr @f6(ptr, i64, i64)
 
-declare i32 @f7(%s.10*, i64, i64, i8*, i64)
+declare i32 @f7(ptr, i64, i64, ptr, i64)

diff  --git a/llvm/test/Transforms/EarlyCSE/guards.ll b/llvm/test/Transforms/EarlyCSE/guards.ll
index 6a1bef9852061..15260715334e1 100644
--- a/llvm/test/Transforms/EarlyCSE/guards.ll
+++ b/llvm/test/Transforms/EarlyCSE/guards.ll
@@ -7,44 +7,44 @@ declare void @llvm.experimental.guard(i1,...)
 
 declare void @llvm.assume(i1)
 
-define i32 @test0(i32* %ptr, i1 %cond) {
+define i32 @test0(ptr %ptr, i1 %cond) {
 ; We can do store to load forwarding over a guard, since it does not
 ; clobber memory
 ; NO_ASSUME-LABEL: @test0(
-; NO_ASSUME-NEXT:    store i32 40, i32* [[PTR:%.*]], align 4
+; NO_ASSUME-NEXT:    store i32 40, ptr [[PTR:%.*]], align 4
 ; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
 ; NO_ASSUME-NEXT:    ret i32 40
 ;
 ; USE_ASSUME-LABEL: @test0(
-; USE_ASSUME-NEXT:    store i32 40, i32* [[PTR:%.*]], align 4
+; USE_ASSUME-NEXT:    store i32 40, ptr [[PTR:%.*]], align 4
 ; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 40
 ;
 
-  store i32 40, i32* %ptr
+  store i32 40, ptr %ptr
   call void(i1,...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
-  %rval = load i32, i32* %ptr
+  %rval = load i32, ptr %ptr
   ret i32 %rval
 }
 
-define i32 @test1(i32* %val, i1 %cond) {
+define i32 @test1(ptr %val, i1 %cond) {
 ; We can CSE loads over a guard, since it does not clobber memory
 ; NO_ASSUME-LABEL: @test1(
-; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4
+; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
 ; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: @test1(
-; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4
+; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
 ; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[VAL]], i64 4), "nonnull"(i32* [[VAL]]), "align"(i32* [[VAL]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[VAL]], i64 4), "nonnull"(ptr [[VAL]]), "align"(ptr [[VAL]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
 
-  %val0 = load i32, i32* %val
+  %val0 = load i32, ptr %val
   call void(i1,...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
-  %val1 = load i32, i32* %val
+  %val1 = load i32, ptr %val
   %rval = sub i32 %val0, %val1
   ret i32 %rval
 }
@@ -180,21 +180,21 @@ right:
   br label %left
 }
 
-define void @test6(i1 %c, i32* %ptr) {
+define void @test6(i1 %c, ptr %ptr) {
 ; Check that we do not DSE over calls to @llvm.experimental.guard.
 ; Guard intrinsics do _read_ memory, so th call to guard below needs
 ; to see the store of 500 to %ptr
 ; CHECK-LABEL: @test6(
-; CHECK-NEXT:    store i32 500, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT:    store i32 500, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[C:%.*]]) [ "deopt"() ]
-; CHECK-NEXT:    store i32 600, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 600, ptr [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
 
 
-  store i32 500, i32* %ptr
+  store i32 500, ptr %ptr
   call void(i1,...) @llvm.experimental.guard(i1 %c) [ "deopt"() ]
-  store i32 600, i32* %ptr
+  store i32 600, ptr %ptr
   ret void
 }
 
@@ -214,52 +214,52 @@ define void @test07(i32 %a, i32 %b) {
   ret void
 }
 
-define void @test08(i32 %a, i32 %b, i32* %ptr) {
+define void @test08(i32 %a, i32 %b, ptr %ptr) {
 ; Check that we deal correctly with stores when removing guards in the same
 ; block in case when the condition is not recalculated.
 ; NO_ASSUME-LABEL: @test08(
 ; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
-; NO_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; NO_ASSUME-NEXT:    store i32 100, ptr [[PTR:%.*]], align 4
 ; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
-; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
+; NO_ASSUME-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @test08(
 ; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
-; USE_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; USE_ASSUME-NEXT:    store i32 100, ptr [[PTR:%.*]], align 4
 ; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
-; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    ret void
 ;
 
   %cmp = icmp eq i32 %a, %b
-  store i32 100, i32* %ptr
+  store i32 100, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 200, i32* %ptr
+  store i32 200, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 300, i32* %ptr
+  store i32 300, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 400, i32* %ptr
+  store i32 400, ptr %ptr
   ret void
 }
 
-define void @test09(i32 %a, i32 %b, i1 %c, i32* %ptr) {
+define void @test09(i32 %a, i32 %b, i1 %c, ptr %ptr) {
 ; Similar to test08, but with more control flow.
 ; TODO: Can we get rid of the store in the end of entry given that it is
 ; post-dominated by other stores?
 ; NO_ASSUME-LABEL: @test09(
 ; NO_ASSUME-NEXT:  entry:
 ; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
-; NO_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; NO_ASSUME-NEXT:    store i32 100, ptr [[PTR:%.*]], align 4
 ; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
-; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
+; NO_ASSUME-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; NO_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
 ; NO_ASSUME:       if.true:
-; NO_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
+; NO_ASSUME-NEXT:    store i32 500, ptr [[PTR]], align 4
 ; NO_ASSUME-NEXT:    br label [[MERGE:%.*]]
 ; NO_ASSUME:       if.false:
-; NO_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
+; NO_ASSUME-NEXT:    store i32 600, ptr [[PTR]], align 4
 ; NO_ASSUME-NEXT:    br label [[MERGE]]
 ; NO_ASSUME:       merge:
 ; NO_ASSUME-NEXT:    ret void
@@ -267,16 +267,16 @@ define void @test09(i32 %a, i32 %b, i1 %c, i32* %ptr) {
 ; USE_ASSUME-LABEL: @test09(
 ; USE_ASSUME-NEXT:  entry:
 ; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
-; USE_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; USE_ASSUME-NEXT:    store i32 100, ptr [[PTR:%.*]], align 4
 ; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
-; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
 ; USE_ASSUME:       if.true:
-; USE_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    store i32 500, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    br label [[MERGE:%.*]]
 ; USE_ASSUME:       if.false:
-; USE_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    store i32 600, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    br label [[MERGE]]
 ; USE_ASSUME:       merge:
 ; USE_ASSUME-NEXT:    ret void
@@ -284,30 +284,30 @@ define void @test09(i32 %a, i32 %b, i1 %c, i32* %ptr) {
 
 entry:
   %cmp = icmp eq i32 %a, %b
-  store i32 100, i32* %ptr
+  store i32 100, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 200, i32* %ptr
+  store i32 200, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 300, i32* %ptr
+  store i32 300, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 400, i32* %ptr
+  store i32 400, ptr %ptr
   br i1 %c, label %if.true, label %if.false
 
 if.true:
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 500, i32* %ptr
+  store i32 500, ptr %ptr
   br label %merge
 
 if.false:
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 600, i32* %ptr
+  store i32 600, ptr %ptr
   br label %merge
 
 merge:
   ret void
 }
 
-define void @test10(i32 %a, i32 %b, i1 %c, i32* %ptr) {
+define void @test10(i32 %a, i32 %b, i1 %c, ptr %ptr) {
 ; Make sure that non-dominating guards do not cause other guards removal.
 ; CHECK-LABEL: @test10(
 ; CHECK-NEXT:  entry:
@@ -315,15 +315,15 @@ define void @test10(i32 %a, i32 %b, i1 %c, i32* %ptr) {
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
 ; CHECK:       if.true:
 ; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
-; CHECK-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT:    store i32 100, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[MERGE:%.*]]
 ; CHECK:       if.false:
-; CHECK-NEXT:    store i32 200, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 200, ptr [[PTR]], align 4
 ; CHECK-NEXT:    br label [[MERGE]]
 ; CHECK:       merge:
-; CHECK-NEXT:    store i32 300, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 300, ptr [[PTR]], align 4
 ; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
-; CHECK-NEXT:    store i32 400, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
 
@@ -333,22 +333,22 @@ entry:
 
 if.true:
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 100, i32* %ptr
+  store i32 100, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
   br label %merge
 
 if.false:
-  store i32 200, i32* %ptr
+  store i32 200, ptr %ptr
   br label %merge
 
 merge:
-  store i32 300, i32* %ptr
+  store i32 300, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 400, i32* %ptr
+  store i32 400, ptr %ptr
   ret void
 }
 
-define void @test11(i32 %a, i32 %b, i32* %ptr) {
+define void @test11(i32 %a, i32 %b, ptr %ptr) {
 ; Make sure that branching condition is applied to guards.
 ; CHECK-LABEL: @test11(
 ; CHECK-NEXT:  entry:
@@ -396,35 +396,35 @@ define void @test12(i32 %a, i32 %b) {
   ret void
 }
 
-define void @test13(i32 %a, i32 %b, i32* %ptr) {
+define void @test13(i32 %a, i32 %b, ptr %ptr) {
 ; Check that we deal correctly with stores when removing guards due to assume.
 ; NO_ASSUME-LABEL: @test13(
 ; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
 ; NO_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR:%.*]], align 4
+; NO_ASSUME-NEXT:    store i32 400, ptr [[PTR:%.*]], align 4
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @test13(
 ; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
 ; USE_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
-; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR:%.*]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    ret void
 ;
 
   %cmp = icmp eq i32 %a, %b
   call void @llvm.assume(i1 %cmp)
-  store i32 100, i32* %ptr
+  store i32 100, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 200, i32* %ptr
+  store i32 200, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 300, i32* %ptr
+  store i32 300, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 400, i32* %ptr
+  store i32 400, ptr %ptr
   ret void
 }
 
-define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
+define void @test14(i32 %a, i32 %b, i1 %c, ptr %ptr) {
 ; Similar to test13, but with more control flow.
 ; TODO: Can we get rid of the store in the end of entry given that it is
 ; post-dominated by other stores?
@@ -432,13 +432,13 @@ define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
 ; NO_ASSUME-NEXT:  entry:
 ; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
 ; NO_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR:%.*]], align 4
+; NO_ASSUME-NEXT:    store i32 400, ptr [[PTR:%.*]], align 4
 ; NO_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
 ; NO_ASSUME:       if.true:
-; NO_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
+; NO_ASSUME-NEXT:    store i32 500, ptr [[PTR]], align 4
 ; NO_ASSUME-NEXT:    br label [[MERGE:%.*]]
 ; NO_ASSUME:       if.false:
-; NO_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
+; NO_ASSUME-NEXT:    store i32 600, ptr [[PTR]], align 4
 ; NO_ASSUME-NEXT:    br label [[MERGE]]
 ; NO_ASSUME:       merge:
 ; NO_ASSUME-NEXT:    ret void
@@ -447,14 +447,14 @@ define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
 ; USE_ASSUME-NEXT:  entry:
 ; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
 ; USE_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
-; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR:%.*]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
 ; USE_ASSUME:       if.true:
-; USE_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    store i32 500, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    br label [[MERGE:%.*]]
 ; USE_ASSUME:       if.false:
-; USE_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
+; USE_ASSUME-NEXT:    store i32 600, ptr [[PTR]], align 4
 ; USE_ASSUME-NEXT:    br label [[MERGE]]
 ; USE_ASSUME:       merge:
 ; USE_ASSUME-NEXT:    ret void
@@ -463,30 +463,30 @@ define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
 entry:
   %cmp = icmp eq i32 %a, %b
   call void @llvm.assume(i1 %cmp)
-  store i32 100, i32* %ptr
+  store i32 100, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 200, i32* %ptr
+  store i32 200, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 300, i32* %ptr
+  store i32 300, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 400, i32* %ptr
+  store i32 400, ptr %ptr
   br i1 %c, label %if.true, label %if.false
 
 if.true:
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 500, i32* %ptr
+  store i32 500, ptr %ptr
   br label %merge
 
 if.false:
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 600, i32* %ptr
+  store i32 600, ptr %ptr
   br label %merge
 
 merge:
   ret void
 }
 
-define void @test15(i32 %a, i32 %b, i1 %c, i32* %ptr) {
+define void @test15(i32 %a, i32 %b, i1 %c, ptr %ptr) {
 ; Make sure that non-dominating assumes do not cause guards removal.
 ; CHECK-LABEL: @test15(
 ; CHECK-NEXT:  entry:
@@ -494,15 +494,15 @@ define void @test15(i32 %a, i32 %b, i1 %c, i32* %ptr) {
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
 ; CHECK:       if.true:
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT:    store i32 100, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[MERGE:%.*]]
 ; CHECK:       if.false:
-; CHECK-NEXT:    store i32 200, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 200, ptr [[PTR]], align 4
 ; CHECK-NEXT:    br label [[MERGE]]
 ; CHECK:       merge:
-; CHECK-NEXT:    store i32 300, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 300, ptr [[PTR]], align 4
 ; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
-; CHECK-NEXT:    store i32 400, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 400, ptr [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
 
@@ -512,18 +512,18 @@ entry:
 
 if.true:
   call void @llvm.assume(i1 %cmp)
-  store i32 100, i32* %ptr
+  store i32 100, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
   br label %merge
 
 if.false:
-  store i32 200, i32* %ptr
+  store i32 200, ptr %ptr
   br label %merge
 
 merge:
-  store i32 300, i32* %ptr
+  store i32 300, ptr %ptr
   call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
-  store i32 400, i32* %ptr
+  store i32 400, ptr %ptr
   ret void
 }
 
@@ -543,7 +543,7 @@ define void @test16(i32 %a, i32 %b) {
   ret void
 }
 
-define void @test17(i32 %a, i32 %b, i1 %c, i32* %ptr) {
+define void @test17(i32 %a, i32 %b, i1 %c, ptr %ptr) {
 ; Check that we don't bother to do anything with assumes even if we know the
 ; condition being true or false (includes come control flow).
 ; CHECK-LABEL: @test17(

diff  --git a/llvm/test/Transforms/EarlyCSE/int_sideeffect.ll b/llvm/test/Transforms/EarlyCSE/int_sideeffect.ll
index f4d8fd25a63c3..c411756059b56 100644
--- a/llvm/test/Transforms/EarlyCSE/int_sideeffect.ll
+++ b/llvm/test/Transforms/EarlyCSE/int_sideeffect.ll
@@ -6,10 +6,10 @@ declare void @llvm.sideeffect()
 
 ; CHECK-LABEL: s2l
 ; CHECK-NOT: load
-define float @s2l(float* %p) {
-    store float 0.0, float* %p
+define float @s2l(ptr %p) {
+    store float 0.0, ptr %p
     call void @llvm.sideeffect()
-    %t = load float, float* %p
+    %t = load float, ptr %p
     ret float %t
 }
 
@@ -18,10 +18,10 @@ define float @s2l(float* %p) {
 ; CHECK-LABEL: rle
 ; CHECK: load
 ; CHECK-NOT: load
-define float @rle(float* %p) {
-    %r = load float, float* %p
+define float @rle(ptr %p) {
+    %r = load float, ptr %p
     call void @llvm.sideeffect()
-    %s = load float, float* %p
+    %s = load float, ptr %p
     %t = fadd float %r, %s
     ret float %t
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/invariant-loads.ll b/llvm/test/Transforms/EarlyCSE/invariant-loads.ll
index df76fd2d61bed..0eca245a5f9a1 100644
--- a/llvm/test/Transforms/EarlyCSE/invariant-loads.ll
+++ b/llvm/test/Transforms/EarlyCSE/invariant-loads.ll
@@ -5,81 +5,81 @@
 
 declare void @clobber_and_use(i32)
 
-define void @f_0(i32* %ptr) {
+define void @f_0(ptr %ptr) {
 ; NO_ASSUME-LABEL: @f_0(
-; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
+; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @f_0(
-; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
+; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; USE_ASSUME-NEXT:    ret void
 ;
 
-  %val0 = load i32, i32* %ptr, !invariant.load !{}
+  %val0 = load i32, ptr %ptr, !invariant.load !{}
   call void @clobber_and_use(i32 %val0)
-  %val1 = load i32, i32* %ptr, !invariant.load !{}
+  %val1 = load i32, ptr %ptr, !invariant.load !{}
   call void @clobber_and_use(i32 %val1)
-  %val2 = load i32, i32* %ptr, !invariant.load !{}
+  %val2 = load i32, ptr %ptr, !invariant.load !{}
   call void @clobber_and_use(i32 %val2)
   ret void
 }
 
-define void @f_1(i32* %ptr) {
+define void @f_1(ptr %ptr) {
 ; We can forward invariant loads to non-invariant loads.
 ; NO_ASSUME-LABEL: @f_1(
-; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
+; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @f_1(
-; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
+; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; USE_ASSUME-NEXT:    ret void
 ;
 
-  %val0 = load i32, i32* %ptr, !invariant.load !{}
+  %val0 = load i32, ptr %ptr, !invariant.load !{}
   call void @clobber_and_use(i32 %val0)
-  %val1 = load i32, i32* %ptr
+  %val1 = load i32, ptr %ptr
   call void @clobber_and_use(i32 %val1)
   ret void
 }
 
-define void @f_2(i32* %ptr) {
+define void @f_2(ptr %ptr) {
 ; We can forward a non-invariant load into an invariant load.
 ; NO_ASSUME-LABEL: @f_2(
-; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4
+; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @f_2(
-; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4
+; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; USE_ASSUME-NEXT:    ret void
 ;
 
-  %val0 = load i32, i32* %ptr
+  %val0 = load i32, ptr %ptr
   call void @clobber_and_use(i32 %val0)
-  %val1 = load i32, i32* %ptr, !invariant.load !{}
+  %val1 = load i32, ptr %ptr, !invariant.load !{}
   call void @clobber_and_use(i32 %val1)
   ret void
 }
 
-define void @f_3(i1 %cond, i32* %ptr) {
+define void @f_3(i1 %cond, ptr %ptr) {
 ; NO_ASSUME-LABEL: @f_3(
-; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
+; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; NO_ASSUME-NEXT:    br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
 ; NO_ASSUME:       left:
@@ -89,23 +89,23 @@ define void @f_3(i1 %cond, i32* %ptr) {
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @f_3(
-; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
+; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; USE_ASSUME-NEXT:    br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
 ; USE_ASSUME:       left:
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[PTR]], i64 4), "nonnull"(ptr [[PTR]]), "align"(ptr [[PTR]], i64 4) ]
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; USE_ASSUME-NEXT:    ret void
 ; USE_ASSUME:       right:
 ; USE_ASSUME-NEXT:    ret void
 ;
-  %val0 = load i32, i32* %ptr, !invariant.load !{}
+  %val0 = load i32, ptr %ptr, !invariant.load !{}
   call void @clobber_and_use(i32 %val0)
   br i1 %cond, label %left, label %right
 
 
 left:
-  %val1 = load i32, i32* %ptr
+  %val1 = load i32, ptr %ptr
   call void @clobber_and_use(i32 %val1)
   ret void
 
@@ -113,17 +113,17 @@ right:
   ret void
 }
 
-define void @f_4(i1 %cond, i32* %ptr) {
+define void @f_4(i1 %cond, ptr %ptr) {
 ; Negative test -- can't forward %val0 to %va1 because that'll break
 ; def-dominates-use.
 ; CHECK-LABEL: @f_4(
 ; CHECK-NEXT:    br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[MERGE:%.*]]
 ; CHECK:       left:
-; CHECK-NEXT:    [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT:    [[VAL0:%.*]] = load i32, ptr [[PTR:%.*]], align 4, !invariant.load !0
 ; CHECK-NEXT:    call void @clobber_and_use(i32 [[VAL0]])
 ; CHECK-NEXT:    br label [[MERGE]]
 ; CHECK:       merge:
-; CHECK-NEXT:    [[VAL1:%.*]] = load i32, i32* [[PTR]], align 4
+; CHECK-NEXT:    [[VAL1:%.*]] = load i32, ptr [[PTR]], align 4
 ; CHECK-NEXT:    call void @clobber_and_use(i32 [[VAL1]])
 ; CHECK-NEXT:    ret void
 ;
@@ -131,13 +131,13 @@ define void @f_4(i1 %cond, i32* %ptr) {
 
 left:
 
-  %val0 = load i32, i32* %ptr, !invariant.load !{}
+  %val0 = load i32, ptr %ptr, !invariant.load !{}
   call void @clobber_and_use(i32 %val0)
   br label %merge
 
 merge:
 
-  %val1 = load i32, i32* %ptr
+  %val1 = load i32, ptr %ptr
   call void @clobber_and_use(i32 %val1)
   ret void
 }
@@ -146,61 +146,61 @@ merge:
 ; LangRef is a bit unclear about whether the store is reachable, so
 ; for the moment we chose to be conservative and just assume it's valid
 ; to restore the same unchanging value.
-define void @test_dse1(i32* %p) {
+define void @test_dse1(ptr %p) {
 ; NO_ASSUME-LABEL: @test_dse1(
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[V1]])
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @test_dse1(
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[V1]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret void
 ;
-  %v1 = load i32, i32* %p, !invariant.load !{}
+  %v1 = load i32, ptr %p, !invariant.load !{}
   call void @clobber_and_use(i32 %v1)
-  store i32 %v1, i32* %p
+  store i32 %v1, ptr %p
   ret void
 }
 
 ; By assumption, v1 must equal v2 (TODO)
-define void @test_false_negative_dse2(i32* %p, i32 %v2) {
+define void @test_false_negative_dse2(ptr %p, i32 %v2) {
 ; CHECK-LABEL: @test_false_negative_dse2(
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0
 ; CHECK-NEXT:    call void @clobber_and_use(i32 [[V1]])
-; CHECK-NEXT:    store i32 [[V2:%.*]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[V2:%.*]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %v1 = load i32, i32* %p, !invariant.load !{}
+  %v1 = load i32, ptr %p, !invariant.load !{}
   call void @clobber_and_use(i32 %v1)
-  store i32 %v2, i32* %p
+  store i32 %v2, ptr %p
   ret void
 }
 
 ; If we remove the load, we still start an invariant scope since
 ; it lets us remove later loads not explicitly marked invariant
-define void @test_scope_start_without_load(i32* %p) {
+define void @test_scope_start_without_load(ptr %p) {
 ; NO_ASSUME-LABEL: @test_scope_start_without_load(
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; NO_ASSUME-NEXT:    [[ADD:%.*]] = add i32 [[V1]], [[V1]]
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[ADD]])
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[V1]])
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @test_scope_start_without_load(
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    [[ADD:%.*]] = add i32 [[V1]], [[V1]]
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[ADD]])
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[V1]])
 ; USE_ASSUME-NEXT:    ret void
 ;
-  %v1 = load i32, i32* %p
-  %v2 = load i32, i32* %p, !invariant.load !{}
+  %v1 = load i32, ptr %p
+  %v2 = load i32, ptr %p, !invariant.load !{}
   %add = add i32 %v1, %v2
   call void @clobber_and_use(i32 %add)
-  %v3 = load i32, i32* %p
+  %v3 = load i32, ptr %p
   call void @clobber_and_use(i32 %v3)
   ret void
 }
@@ -208,9 +208,9 @@ define void @test_scope_start_without_load(i32* %p) {
 ; If we already have an invariant scope, don't want to start a new one
 ; with a potentially greater generation.  This hides the earlier invariant
 ; load
-define void @test_scope_restart(i32* %p) {
+define void @test_scope_restart(ptr %p) {
 ; NO_ASSUME-LABEL: @test_scope_restart(
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[V1]])
 ; NO_ASSUME-NEXT:    [[ADD:%.*]] = add i32 [[V1]], [[V1]]
 ; NO_ASSUME-NEXT:    call void @clobber_and_use(i32 [[ADD]])
@@ -218,20 +218,20 @@ define void @test_scope_restart(i32* %p) {
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: @test_scope_restart(
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P:%.*]], align 4, !invariant.load !0
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[V1]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    [[ADD:%.*]] = add i32 [[V1]], [[V1]]
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[ADD]])
 ; USE_ASSUME-NEXT:    call void @clobber_and_use(i32 [[V1]])
 ; USE_ASSUME-NEXT:    ret void
 ;
-  %v1 = load i32, i32* %p, !invariant.load !{}
+  %v1 = load i32, ptr %p, !invariant.load !{}
   call void @clobber_and_use(i32 %v1)
-  %v2 = load i32, i32* %p, !invariant.load !{}
+  %v2 = load i32, ptr %p, !invariant.load !{}
   %add = add i32 %v1, %v2
   call void @clobber_and_use(i32 %add)
-  %v3 = load i32, i32* %p
+  %v3 = load i32, ptr %p
   call void @clobber_and_use(i32 %v3)
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/invariant.start.ll b/llvm/test/Transforms/EarlyCSE/invariant.start.ll
index 79eadf5ff3fb5..dcf80f1402ee1 100644
--- a/llvm/test/Transforms/EarlyCSE/invariant.start.ll
+++ b/llvm/test/Transforms/EarlyCSE/invariant.start.ll
@@ -3,306 +3,304 @@
 ; RUN: opt < %s -S -early-cse --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME
 ; RUN: opt < %s -S -passes=early-cse | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
 
-declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
-declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
+declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) nounwind readonly
+declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture) nounwind
 
 ; Check that we do load-load forwarding over invariant.start, since it does not
 ; clobber memory
-define i8 @test_bypass1(i8 *%P) {
+define i8 @test_bypass1(ptr%P) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass1
-; NO_ASSUME-SAME: (i8* [[P:%.*]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i8, i8* [[P]], align 1
-; NO_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i8, ptr [[P]], align 1
+; NO_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
 ; NO_ASSUME-NEXT:    ret i8 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass1
-; USE_ASSUME-SAME: (i8* [[P:%.*]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i8, i8* [[P]], align 1
-; USE_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i8, ptr [[P]], align 1
+; USE_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ]
 ; USE_ASSUME-NEXT:    ret i8 0
 ;
 
-  %V1 = load i8, i8* %P
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
-  %V2 = load i8, i8* %P
+  %V1 = load i8, ptr %P
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %P)
+  %V2 = load i8, ptr %P
   %Diff = sub i8 %V1, %V2
   ret i8 %Diff
 }
 
 
 ; Trivial Store->load forwarding over invariant.start
-define i8 @test_bypass2(i8 *%P) {
+define i8 @test_bypass2(ptr%P) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass2
-; NO_ASSUME-SAME: (i8* [[P:%.*]])
-; NO_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1
-; NO_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1
+; NO_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
 ; NO_ASSUME-NEXT:    ret i8 42
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass2
-; USE_ASSUME-SAME: (i8* [[P:%.*]])
-; USE_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1
-; USE_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1
+; USE_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ]
 ; USE_ASSUME-NEXT:    ret i8 42
 ;
 
-  store i8 42, i8* %P
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
-  %V1 = load i8, i8* %P
+  store i8 42, ptr %P
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %P)
+  %V1 = load i8, ptr %P
   ret i8 %V1
 }
 
-define i8 @test_bypass_store_load(i8 *%P, i8 *%P2) {
+define i8 @test_bypass_store_load(ptr%P, ptr%P2) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass_store_load
-; NO_ASSUME-SAME: (i8* [[P:%.*]], i8* [[P2:%.*]])
-; NO_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1
-; NO_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; NO_ASSUME-NEXT:    store i8 0, i8* [[P2]], align 1
+; NO_ASSUME-SAME: (ptr [[P:%.*]], ptr [[P2:%.*]])
+; NO_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1
+; NO_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; NO_ASSUME-NEXT:    store i8 0, ptr [[P2]], align 1
 ; NO_ASSUME-NEXT:    ret i8 42
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass_store_load
-; USE_ASSUME-SAME: (i8* [[P:%.*]], i8* [[P2:%.*]])
-; USE_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1
-; USE_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; USE_ASSUME-NEXT:    store i8 0, i8* [[P2]], align 1
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
+; USE_ASSUME-SAME: (ptr [[P:%.*]], ptr [[P2:%.*]])
+; USE_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1
+; USE_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; USE_ASSUME-NEXT:    store i8 0, ptr [[P2]], align 1
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ]
 ; USE_ASSUME-NEXT:    ret i8 42
 ;
 
-  store i8 42, i8* %P
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
-  store i8 0, i8* %P2
-  %V1 = load i8, i8* %P
+  store i8 42, ptr %P
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %P)
+  store i8 0, ptr %P2
+  %V1 = load i8, ptr %P
   ret i8 %V1
 }
 
-define i8 @test_bypass_store_load_aatags_1(i8 *%P, i8 *%P2) {
+define i8 @test_bypass_store_load_aatags_1(ptr%P, ptr%P2) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass_store_load_aatags_1
-; NO_ASSUME-SAME: (i8* [[P:%.*]], i8* [[P2:%.*]])
-; NO_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1, !tbaa !0
-; NO_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; NO_ASSUME-NEXT:    store i8 0, i8* [[P2]], align 1
+; NO_ASSUME-SAME: (ptr [[P:%.*]], ptr [[P2:%.*]])
+; NO_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1, !tbaa !0
+; NO_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; NO_ASSUME-NEXT:    store i8 0, ptr [[P2]], align 1
 ; NO_ASSUME-NEXT:    ret i8 42
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass_store_load_aatags_1
-; USE_ASSUME-SAME: (i8* [[P:%.*]], i8* [[P2:%.*]])
-; USE_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1, !tbaa !0
-; USE_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; USE_ASSUME-NEXT:    store i8 0, i8* [[P2]], align 1
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
+; USE_ASSUME-SAME: (ptr [[P:%.*]], ptr [[P2:%.*]])
+; USE_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1, !tbaa !0
+; USE_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; USE_ASSUME-NEXT:    store i8 0, ptr [[P2]], align 1
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ]
 ; USE_ASSUME-NEXT:    ret i8 42
 ;
 
-  store i8 42, i8* %P, !tbaa !0
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
-  store i8 0, i8* %P2
-  %V1 = load i8, i8* %P
+  store i8 42, ptr %P, !tbaa !0
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %P)
+  store i8 0, ptr %P2
+  %V1 = load i8, ptr %P
   ret i8 %V1
 }
 
 ; The test demonstrates a missed optimization opportunity in case when the load
 ; has AA tags that are 
diff erent from the store tags.
-define i8 @test_bypass_store_load_aatags_2(i8 *%P, i8 *%P2) {
+define i8 @test_bypass_store_load_aatags_2(ptr%P, ptr%P2) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass_store_load_aatags_2
-; NO_ASSUME-SAME: (i8* [[P:%.*]], i8* [[P2:%.*]])
-; NO_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1
-; NO_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; NO_ASSUME-NEXT:    store i8 0, i8* [[P2]], align 1
-; NO_ASSUME-NEXT:    %V1 = load i8, i8* %P, align 1, !tbaa !0
+; NO_ASSUME-SAME: (ptr [[P:%.*]], ptr [[P2:%.*]])
+; NO_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1
+; NO_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; NO_ASSUME-NEXT:    store i8 0, ptr [[P2]], align 1
+; NO_ASSUME-NEXT:    %V1 = load i8, ptr %P, align 1, !tbaa !0
 ; NO_ASSUME-NEXT:    ret i8 %V1
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass_store_load_aatags_2
-; USE_ASSUME-SAME: (i8* [[P:%.*]], i8* [[P2:%.*]])
-; USE_ASSUME-NEXT:    store i8 42, i8* [[P]], align 1
-; USE_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; USE_ASSUME-NEXT:    store i8 0, i8* [[P2]], align 1
-; USE_ASSUME-NEXT:    %V1 = load i8, i8* %P, align 1, !tbaa !0
+; USE_ASSUME-SAME: (ptr [[P:%.*]], ptr [[P2:%.*]])
+; USE_ASSUME-NEXT:    store i8 42, ptr [[P]], align 1
+; USE_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; USE_ASSUME-NEXT:    store i8 0, ptr [[P2]], align 1
+; USE_ASSUME-NEXT:    %V1 = load i8, ptr %P, align 1, !tbaa !0
 ; USE_ASSUME-NEXT:    ret i8 %V1
 ;
 
-  store i8 42, i8* %P
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
-  store i8 0, i8* %P2
-  %V1 = load i8, i8* %P, !tbaa !0
+  store i8 42, ptr %P
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %P)
+  store i8 0, ptr %P2
+  %V1 = load i8, ptr %P, !tbaa !0
   ret i8 %V1
 }
 
 ; We can DSE over invariant.start calls, since the first store to
 ; %P is valid, and the second store is actually unreachable based on semantics
 ; of invariant.start.
-define void @test_bypass3(i8* %P) {
+define void @test_bypass3(ptr %P) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass3
-; NO_ASSUME-SAME: (i8* [[P:%.*]])
-; NO_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; NO_ASSUME-NEXT:    store i8 60, i8* [[P]], align 1
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; NO_ASSUME-NEXT:    store i8 60, ptr [[P]], align 1
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass3
-; USE_ASSUME-SAME: (i8* [[P:%.*]])
-; USE_ASSUME-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
-; USE_ASSUME-NEXT:    store i8 60, i8* [[P]], align 1
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 1), "nonnull"(ptr [[P]]) ]
+; USE_ASSUME-NEXT:    store i8 60, ptr [[P]], align 1
 ; USE_ASSUME-NEXT:    ret void
 ;
 
-  store i8 50, i8* %P
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
-  store i8 60, i8* %P
+  store i8 50, ptr %P
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %P)
+  store i8 60, ptr %P
   ret void
 }
 
 
 ; FIXME: Now the first store can actually be eliminated, since there is no read within
 ; the invariant region, between start and end.
-define void @test_bypass4(i8* %P) {
+define void @test_bypass4(ptr %P) {
 ; CHECK-LABEL: define {{[^@]+}}@test_bypass4
-; CHECK-SAME: (i8* [[P:%.*]])
-; CHECK-NEXT:    store i8 50, i8* [[P]], align 1
-; CHECK-NEXT:    [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
-; CHECK-NEXT:    call void @llvm.invariant.end.p0i8({}* [[I]], i64 1, i8* [[P]])
-; CHECK-NEXT:    store i8 60, i8* [[P]], align 1
+; CHECK-SAME: (ptr [[P:%.*]])
+; CHECK-NEXT:    store i8 50, ptr [[P]], align 1
+; CHECK-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[P]])
+; CHECK-NEXT:    call void @llvm.invariant.end.p0(ptr [[I]], i64 1, ptr [[P]])
+; CHECK-NEXT:    store i8 60, ptr [[P]], align 1
 ; CHECK-NEXT:    ret void
 ;
 
 
-  store i8 50, i8* %P
-  %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P)
-  call void @llvm.invariant.end.p0i8({}* %i, i64 1, i8* %P)
-  store i8 60, i8* %P
+  store i8 50, ptr %P
+  %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %P)
+  call void @llvm.invariant.end.p0(ptr %i, i64 1, ptr %P)
+  store i8 60, ptr %P
   ret void
 }
 
 
 declare void @clobber()
-declare {}* @llvm.invariant.start.p0i32(i64 %size, i32* nocapture %ptr)
-declare void @llvm.invariant.end.p0i32({}*, i64, i32* nocapture) nounwind
 
-define i32 @test_before_load(i32* %p) {
+define i32 @test_before_load(ptr %p) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_load
-; NO_ASSUME-SAME: (i32* [[P:%.*]])
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; NO_ASSUME-NEXT:    call void @clobber()
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_load
-; USE_ASSUME-SAME: (i32* [[P:%.*]])
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  %v1 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  %v1 = load i32, ptr %p
   call void @clobber()
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_before_clobber(i32* %p) {
+define i32 @test_before_clobber(ptr %p) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber
-; NO_ASSUME-SAME: (i32* [[P:%.*]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; NO_ASSUME-NEXT:    call void @clobber()
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber
-; USE_ASSUME-SAME: (i32* [[P:%.*]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
-  %v1 = load i32, i32* %p
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
+  %v1 = load i32, ptr %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
   call void @clobber()
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_duplicate_scope(i32* %p) {
+define i32 @test_duplicate_scope(ptr %p) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope
-; NO_ASSUME-SAME: (i32* [[P:%.*]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; NO_ASSUME-NEXT:    call void @clobber()
-; NO_ASSUME-NEXT:    [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; NO_ASSUME-NEXT:    [[TMP2:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope
-; USE_ASSUME-SAME: (i32* [[P:%.*]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    [[TMP2:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
-  %v1 = load i32, i32* %p
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
+  %v1 = load i32, ptr %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
   call void @clobber()
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  %v2 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_unanalzyable_load(i32* %p) {
+define i32 @test_unanalzyable_load(ptr %p) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load
-; NO_ASSUME-SAME: (i32* [[P:%.*]])
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; NO_ASSUME-NEXT:    call void @clobber()
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; NO_ASSUME-NEXT:    call void @clobber()
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_unanalzyable_load
-; USE_ASSUME-SAME: (i32* [[P:%.*]])
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
   call void @clobber()
-  %v1 = load i32, i32* %p
+  %v1 = load i32, ptr %p
   call void @clobber()
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_negative_after_clobber(i32* %p) {
+define i32 @test_negative_after_clobber(ptr %p) {
 ; CHECK-LABEL: define {{[^@]+}}@test_negative_after_clobber
-; CHECK-SAME: (i32* [[P:%.*]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; CHECK-NEXT:    [[V2:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  %v1 = load i32, i32* %p
+  %v1 = load i32, ptr %p
   call void @clobber()
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  %v2 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_merge(i32* %p, i1 %cnd) {
+define i32 @test_merge(ptr %p, i1 %cnd) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge
-; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; NO_ASSUME-NEXT:    br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
 ; NO_ASSUME:       taken:
 ; NO_ASSUME-NEXT:    call void @clobber()
@@ -311,92 +309,92 @@ define i32 @test_merge(i32* %p, i1 %cnd) {
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge
-; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; USE_ASSUME-NEXT:    br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
 ; USE_ASSUME:       taken:
 ; USE_ASSUME-NEXT:    call void @clobber()
 ; USE_ASSUME-NEXT:    br label [[MERGE]]
 ; USE_ASSUME:       merge:
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
-  %v1 = load i32, i32* %p
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
+  %v1 = load i32, ptr %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
   br i1 %cnd, label %merge, label %taken
 
 taken:
   call void @clobber()
   br label %merge
 merge:
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_negative_after_mergeclobber(i32* %p, i1 %cnd) {
+define i32 @test_negative_after_mergeclobber(ptr %p, i1 %cnd) {
 ; CHECK-LABEL: define {{[^@]+}}@test_negative_after_mergeclobber
-; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
 ; CHECK:       taken:
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    br label [[MERGE]]
 ; CHECK:       merge:
-; CHECK-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; CHECK-NEXT:    [[V2:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  %v1 = load i32, i32* %p
+  %v1 = load i32, ptr %p
   br i1 %cnd, label %merge, label %taken
 
 taken:
   call void @clobber()
   br label %merge
 merge:
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  %v2 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
 ; In theory, this version could work, but earlycse is incapable of
 ; merging facts along distinct paths.
-define i32 @test_false_negative_merge(i32* %p, i1 %cnd) {
+define i32 @test_false_negative_merge(ptr %p, i1 %cnd) {
 ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_merge
-; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
 ; CHECK:       taken:
-; CHECK-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; CHECK-NEXT:    call void @clobber()
 ; CHECK-NEXT:    br label [[MERGE]]
 ; CHECK:       merge:
-; CHECK-NEXT:    [[V2:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  %v1 = load i32, i32* %p
+  %v1 = load i32, ptr %p
   br i1 %cnd, label %merge, label %taken
 
 taken:
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
   call void @clobber()
   br label %merge
 merge:
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_merge_unanalyzable_load(i32* %p, i1 %cnd) {
+define i32 @test_merge_unanalyzable_load(ptr %p, i1 %cnd) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load
-; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; NO_ASSUME-NEXT:    call void @clobber()
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; NO_ASSUME-NEXT:    br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
 ; NO_ASSUME:       taken:
 ; NO_ASSUME-NEXT:    call void @clobber()
@@ -405,75 +403,75 @@ define i32 @test_merge_unanalyzable_load(i32* %p, i1 %cnd) {
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge_unanalyzable_load
-; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; USE_ASSUME-NEXT:    br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
 ; USE_ASSUME:       taken:
 ; USE_ASSUME-NEXT:    call void @clobber()
 ; USE_ASSUME-NEXT:    br label [[MERGE]]
 ; USE_ASSUME:       merge:
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
   call void @clobber()
-  %v1 = load i32, i32* %p
+  %v1 = load i32, ptr %p
   br i1 %cnd, label %merge, label %taken
 
 taken:
   call void @clobber()
   br label %merge
 merge:
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define void @test_dse_before_load(i32* %p, i1 %cnd) {
+define void @test_dse_before_load(ptr %p, i1 %cnd) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load
-; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; NO_ASSUME-NEXT:    call void @clobber()
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load
-; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret void
 ;
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  %v1 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  %v1 = load i32, ptr %p
   call void @clobber()
-  store i32 %v1, i32* %p
+  store i32 %v1, ptr %p
   ret void
 }
 
-define void @test_dse_after_load(i32* %p, i1 %cnd) {
+define void @test_dse_after_load(ptr %p, i1 %cnd) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load
-; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; NO_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; NO_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; NO_ASSUME-NEXT:    call void @clobber()
 ; NO_ASSUME-NEXT:    ret void
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load
-; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
-; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
+; USE_ASSUME-SAME: (ptr [[P:%.*]], i1 [[CND:%.*]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
+; USE_ASSUME-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret void
 ;
-  %v1 = load i32, i32* %p
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
+  %v1 = load i32, ptr %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
   call void @clobber()
-  store i32 %v1, i32* %p
+  store i32 %v1, ptr %p
   ret void
 }
 
@@ -481,122 +479,120 @@ define void @test_dse_after_load(i32* %p, i1 %cnd) {
 ; In this case, we have a false negative since MemoryLocation is implicitly
 ; typed due to the user of a Value to represent the address.  Note that other
 ; passes will canonicalize away the bitcasts in this example.
-define i32 @test_false_negative_types(i32* %p) {
+define i32 @test_false_negative_types(ptr %p) {
 ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_types
-; CHECK-SAME: (i32* [[P:%.*]])
-; CHECK-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    [[PF:%.*]] = bitcast i32* [[P]] to float*
-; CHECK-NEXT:    [[V2F:%.*]] = load float, float* [[PF]], align 4
+; CHECK-NEXT:    [[V2F:%.*]] = load float, ptr [[P]], align 4
 ; CHECK-NEXT:    [[V2:%.*]] = bitcast float [[V2F]] to i32
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  %v1 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  %v1 = load i32, ptr %p
   call void @clobber()
-  %pf = bitcast i32* %p to float*
-  %v2f = load float, float* %pf
+  %v2f = load float, ptr %p
   %v2 = bitcast float %v2f to i32
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_negative_size1(i32* %p) {
+define i32 @test_negative_size1(ptr %p) {
 ; CHECK-LABEL: define {{[^@]+}}@test_negative_size1
-; CHECK-SAME: (i32* [[P:%.*]])
-; CHECK-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 3, i32* [[P]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 3, ptr [[P]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    [[V2:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  call {}* @llvm.invariant.start.p0i32(i64 3, i32* %p)
-  %v1 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 3, ptr %p)
+  %v1 = load i32, ptr %p
   call void @clobber()
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_negative_size2(i32* %p) {
+define i32 @test_negative_size2(ptr %p) {
 ; CHECK-LABEL: define {{[^@]+}}@test_negative_size2
-; CHECK-SAME: (i32* [[P:%.*]])
-; CHECK-NEXT:    [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 0, i32* [[P]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @llvm.invariant.start.p0(i64 0, ptr [[P]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    [[V2:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  call {}* @llvm.invariant.start.p0i32(i64 0, i32* %p)
-  %v1 = load i32, i32* %p
+  call ptr @llvm.invariant.start.p0(i64 0, ptr %p)
+  %v1 = load i32, ptr %p
   call void @clobber()
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_negative_scope(i32* %p) {
+define i32 @test_negative_scope(ptr %p) {
 ; CHECK-LABEL: define {{[^@]+}}@test_negative_scope
-; CHECK-SAME: (i32* [[P:%.*]])
-; CHECK-NEXT:    [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; CHECK-NEXT:    call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]])
+; CHECK-NEXT:    [[SCOPE:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; CHECK-NEXT:    call void @llvm.invariant.end.p0(ptr [[SCOPE]], i64 4, ptr [[P]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    [[V2:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  %scope = call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  call void @llvm.invariant.end.p0i32({}* %scope, i64 4, i32* %p)
-  %v1 = load i32, i32* %p
+  %scope = call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  call void @llvm.invariant.end.p0(ptr %scope, i64 4, ptr %p)
+  %v1 = load i32, ptr %p
   call void @clobber()
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
-define i32 @test_false_negative_scope(i32* %p) {
+define i32 @test_false_negative_scope(ptr %p) {
 ; CHECK-LABEL: define {{[^@]+}}@test_false_negative_scope
-; CHECK-SAME: (i32* [[P:%.*]])
-; CHECK-NEXT:    [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-SAME: (ptr [[P:%.*]])
+; CHECK-NEXT:    [[SCOPE:%.*]] = call ptr @llvm.invariant.start.p0(i64 4, ptr [[P]])
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4
 ; CHECK-NEXT:    call void @clobber()
-; CHECK-NEXT:    [[V2:%.*]] = load i32, i32* [[P]], align 4
-; CHECK-NEXT:    call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]])
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT:    call void @llvm.invariant.end.p0(ptr [[SCOPE]], i64 4, ptr [[P]])
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NEXT:    ret i32 [[SUB]]
 ;
-  %scope = call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
-  %v1 = load i32, i32* %p
+  %scope = call ptr @llvm.invariant.start.p0(i64 4, ptr %p)
+  %v1 = load i32, ptr %p
   call void @clobber()
-  %v2 = load i32, i32* %p
-  call void @llvm.invariant.end.p0i32({}* %scope, i64 4, i32* %p)
+  %v2 = load i32, ptr %p
+  call void @llvm.invariant.end.p0(ptr %scope, i64 4, ptr %p)
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }
 
 ; Invariant load defact starts an invariant.start scope of the appropriate size
-define i32 @test_invariant_load_scope(i32* %p) {
+define i32 @test_invariant_load_scope(ptr %p) {
 ; NO_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope
-; NO_ASSUME-SAME: (i32* [[P:%.*]])
-; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !4
+; NO_ASSUME-SAME: (ptr [[P:%.*]])
+; NO_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4, !invariant.load !4
 ; NO_ASSUME-NEXT:    call void @clobber()
 ; NO_ASSUME-NEXT:    ret i32 0
 ;
 ; USE_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope
-; USE_ASSUME-SAME: (i32* [[P:%.*]])
-; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !4
+; USE_ASSUME-SAME: (ptr [[P:%.*]])
+; USE_ASSUME-NEXT:    [[V1:%.*]] = load i32, ptr [[P]], align 4, !invariant.load !4
 ; USE_ASSUME-NEXT:    call void @clobber()
-; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
+; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 4), "nonnull"(ptr [[P]]), "align"(ptr [[P]], i64 4) ]
 ; USE_ASSUME-NEXT:    ret i32 0
 ;
-  %v1 = load i32, i32* %p, !invariant.load !{}
+  %v1 = load i32, ptr %p, !invariant.load !{}
   call void @clobber()
-  %v2 = load i32, i32* %p
+  %v2 = load i32, ptr %p
   %sub = sub i32 %v1, %v2
   ret i32 %sub
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll b/llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll
index cf5641d855514..9357f99ebb5f6 100644
--- a/llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll
+++ b/llvm/test/Transforms/EarlyCSE/masked-intrinsics-unequal-masks.ll
@@ -10,44 +10,44 @@
 
 ; Load-load, second mask is a submask of the first, second through is undef.
 ; Expect the second load to be removed.
-define <4 x i32> @f3(<4 x i32>* %a0, <4 x i32> %a1) {
+define <4 x i32> @f3(ptr %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: @f3(
-; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
+; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[V2:%.*]] = add <4 x i32> [[V0]], [[V0]]
 ; CHECK-NEXT:    ret <4 x i32> [[V2]]
 ;
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
-  %v1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> undef)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
+  %v1 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> undef)
   %v2 = add <4 x i32> %v0, %v1
   ret <4 x i32> %v2
 }
 
 ; Load-load, second mask is a submask of the first, second through is not undef.
 ; Expect the second load to remain.
-define <4 x i32> @f4(<4 x i32>* %a0, <4 x i32> %a1) {
+define <4 x i32> @f4(ptr %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: @f4(
-; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
-; CHECK-NEXT:    [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
+; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
+; CHECK-NEXT:    [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[V2:%.*]] = add <4 x i32> [[V0]], [[V1]]
 ; CHECK-NEXT:    ret <4 x i32> [[V2]]
 ;
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
-  %v1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
+  %v1 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
   %v2 = add <4 x i32> %v0, %v1
   ret <4 x i32> %v2
 }
 
 ; Load-load, second mask is not a submask of the first, second through is undef.
 ; Expect the second load to remain.
-define <4 x i32> @f5(<4 x i32>* %a0, <4 x i32> %a1) {
+define <4 x i32> @f5(ptr %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: @f5(
-; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
-; CHECK-NEXT:    [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
+; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
+; CHECK-NEXT:    [[V1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[V2:%.*]] = add <4 x i32> [[V0]], [[V1]]
 ; CHECK-NEXT:    ret <4 x i32> [[V2]]
 ;
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
-  %v1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
+  %v1 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
   %v2 = add <4 x i32> %v0, %v1
   ret <4 x i32> %v2
 }
@@ -57,26 +57,26 @@ define <4 x i32> @f5(<4 x i32>* %a0, <4 x i32> %a1) {
 
 ; Store-store, first mask is a submask of the second.
 ; Expect the first store to be removed.
-define void @f6(<4 x i32> %a0, <4 x i32>* %a1) {
+define void @f6(<4 x i32> %a0, ptr %a1) {
 ; CHECK-LABEL: @f6(
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a0, ptr %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a0, ptr %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
   ret void
 }
 
 ; Store-store, first mask is not a submask of the second.
 ; Expect both stores to remain.
-define void @f7(<4 x i32> %a0, <4 x i32>* %a1) {
+define void @f7(<4 x i32> %a0, ptr %a1) {
 ; CHECK-LABEL: @f7(
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0]], <4 x i32>* [[A1]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0]], ptr [[A1]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a0, ptr %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a0, ptr %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
   ret void
 }
 
@@ -85,26 +85,26 @@ define void @f7(<4 x i32> %a0, <4 x i32>* %a1) {
 
 ; Load-store, second mask is a submask of the first.
 ; Expect the store to be removed.
-define <4 x i32> @f8(<4 x i32>* %a0, <4 x i32> %a1) {
+define <4 x i32> @f8(ptr %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: @f8(
-; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
+; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    ret <4 x i32> [[V0]]
 ;
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %v0, <4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> %a1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %v0, ptr %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
   ret <4 x i32> %v0
 }
 
 ; Load-store, second mask is not a submask of the first.
 ; Expect the store to remain.
-define <4 x i32> @f9(<4 x i32>* %a0, <4 x i32> %a1) {
+define <4 x i32> @f9(ptr %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: @f9(
-; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[V0]], <4 x i32>* [[A0]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A0:%.*]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> [[A1:%.*]])
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V0]], ptr [[A0]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
 ; CHECK-NEXT:    ret <4 x i32> [[V0]]
 ;
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> %a1)
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %v0, <4 x i32>* %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a0, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> %a1)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %v0, ptr %a0, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
   ret <4 x i32> %v0
 }
 
@@ -113,41 +113,41 @@ define <4 x i32> @f9(<4 x i32>* %a0, <4 x i32> %a1) {
 
 ; Store-load, load's mask is a submask of store's mask, thru is undef.
 ; Expect the load to be removed.
-define <4 x i32> @fa(<4 x i32> %a0, <4 x i32>* %a1) {
+define <4 x i32> @fa(<4 x i32> %a0, ptr %a1) {
 ; CHECK-LABEL: @fa(
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
 ; CHECK-NEXT:    ret <4 x i32> [[A0]]
 ;
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> undef)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a0, ptr %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> undef)
   ret <4 x i32> %v0
 }
 
 ; Store-load, load's mask is a submask of store's mask, thru is not undef.
 ; Expect the load to remain.
-define <4 x i32> @fb(<4 x i32> %a0, <4 x i32>* %a1) {
+define <4 x i32> @fb(<4 x i32> %a0, ptr %a1) {
 ; CHECK-LABEL: @fb(
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
-; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A1]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A1]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
 ; CHECK-NEXT:    ret <4 x i32> [[V0]]
 ;
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a0, ptr %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> zeroinitializer)
   ret <4 x i32> %v0
 }
 
 ; Store-load, load's mask is not a submask of store's mask, thru is undef.
 ; Expect the load to remain.
-define <4 x i32> @fc(<4 x i32> %a0, <4 x i32>* %a1) {
+define <4 x i32> @fc(<4 x i32> %a0, ptr %a1) {
 ; CHECK-LABEL: @fc(
-; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[A0:%.*]], <4 x i32>* [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
-; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[A1]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> undef)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i32.p0(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+; CHECK-NEXT:    [[V0:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[A1]], i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> undef)
 ; CHECK-NEXT:    ret <4 x i32> [[V0]]
 ;
-  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a0, <4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
-  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> undef)
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %a0, ptr %a1, i32 4, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+  %v0 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %a1, i32 4, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> undef)
   ret <4 x i32> %v0
 }
 
-declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
-declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)

diff  --git a/llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll b/llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll
index 392a487f627dd..aa3167ab5e3f8 100644
--- a/llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll
+++ b/llvm/test/Transforms/EarlyCSE/masked-intrinsics.ll
@@ -1,43 +1,43 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -early-cse < %s | FileCheck %s
 
-define <128 x i8> @f0(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) {
+define <128 x i8> @f0(ptr %a0, <128 x i8> %a1, <128 x i8> %a2) {
 ; CHECK-LABEL: @f0(
 ; CHECK-NEXT:    [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]]
-; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> [[A1]], <128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]])
+; CHECK-NEXT:    call void @llvm.masked.store.v128i8.p0(<128 x i8> [[A1]], ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]])
 ; CHECK-NEXT:    ret <128 x i8> [[A1]]
 ;
   %v0 = icmp eq <128 x i8> %a1, %a2
-  call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %a1, <128 x i8>* %a0, i32 4, <128 x i1> %v0)
-  %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
+  call void @llvm.masked.store.v128i8.p0(<128 x i8> %a1, ptr %a0, i32 4, <128 x i1> %v0)
+  %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
   ret <128 x i8> %v1
 }
 
-define <128 x i8> @f1(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) {
+define <128 x i8> @f1(ptr %a0, <128 x i8> %a1, <128 x i8> %a2) {
 ; CHECK-LABEL: @f1(
 ; CHECK-NEXT:    [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]]
-; CHECK-NEXT:    [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef)
+; CHECK-NEXT:    [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef)
 ; CHECK-NEXT:    ret <128 x i8> [[V1]]
 ;
   %v0 = icmp eq <128 x i8> %a1, %a2
-  %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
-  call void @llvm.masked.store.v128i8.p0v128i8(<128 x i8> %v1, <128 x i8>* %a0, i32 4, <128 x i1> %v0)
+  %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
+  call void @llvm.masked.store.v128i8.p0(<128 x i8> %v1, ptr %a0, i32 4, <128 x i1> %v0)
   ret <128 x i8> %v1
 }
 
-define <128 x i8> @f2(<128 x i8>* %a0, <128 x i8> %a1, <128 x i8> %a2) {
+define <128 x i8> @f2(ptr %a0, <128 x i8> %a1, <128 x i8> %a2) {
 ; CHECK-LABEL: @f2(
 ; CHECK-NEXT:    [[V0:%.*]] = icmp eq <128 x i8> [[A1:%.*]], [[A2:%.*]]
-; CHECK-NEXT:    [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef)
+; CHECK-NEXT:    [[V1:%.*]] = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr [[A0:%.*]], i32 4, <128 x i1> [[V0]], <128 x i8> undef)
 ; CHECK-NEXT:    [[V3:%.*]] = add <128 x i8> [[V1]], [[V1]]
 ; CHECK-NEXT:    ret <128 x i8> [[V3]]
 ;
   %v0 = icmp eq <128 x i8> %a1, %a2
-  %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
-  %v2 = call <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>* %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
+  %v1 = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
+  %v2 = call <128 x i8> @llvm.masked.load.v128i8.p0(ptr %a0, i32 4, <128 x i1> %v0, <128 x i8> undef)
   %v3 = add <128 x i8> %v1, %v2
   ret <128 x i8> %v3
 }
 
-declare <128 x i8> @llvm.masked.load.v128i8.p0v128i8(<128 x i8>*, i32, <128 x i1>, <128 x i8>)
-declare void @llvm.masked.store.v128i8.p0v128i8(<128 x i8>, <128 x i8>*, i32, <128 x i1>)
+declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32, <128 x i1>, <128 x i8>)
+declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32, <128 x i1>)

diff  --git a/llvm/test/Transforms/EarlyCSE/memoryssa.ll b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
index 730e8104452dc..b9c0ecece5357 100644
--- a/llvm/test/Transforms/EarlyCSE/memoryssa.ll
+++ b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
@@ -11,20 +11,20 @@
 ;; Simple load value numbering across non-clobbering store.
 define i32 @test1() {
 ; CHECK-NOMEMSSA-LABEL: @test1(
-; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
-; CHECK-NOMEMSSA-NEXT:    store i32 0, i32* @G2, align 4
-; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 0, ptr @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, ptr @G1, align 4
 ; CHECK-NOMEMSSA-NEXT:    [[DIFF:%.*]] = sub i32 [[V1]], [[V2]]
 ; CHECK-NOMEMSSA-NEXT:    ret i32 [[DIFF]]
 ;
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
-; CHECK-NEXT:    store i32 0, i32* @G2, align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
+; CHECK-NEXT:    store i32 0, ptr @G2, align 4
 ; CHECK-NEXT:    ret i32 0
 ;
-  %V1 = load i32, i32* @G1
-  store i32 0, i32* @G2
-  %V2 = load i32, i32* @G1
+  %V1 = load i32, ptr @G1
+  store i32 0, ptr @G2
+  %V2 = load i32, ptr @G1
   %Diff = sub i32 %V1, %V2
   ret i32 %Diff
 }
@@ -33,181 +33,181 @@ define i32 @test1() {
 define void @test2() {
 ; CHECK-NOMEMSSA-LABEL: @test2(
 ; CHECK-NOMEMSSA-NEXT:  entry:
-; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
-; CHECK-NOMEMSSA-NEXT:    store i32 0, i32* @G2, align 4
-; CHECK-NOMEMSSA-NEXT:    store i32 [[V1]], i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 0, ptr @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[V1]], ptr @G1, align 4
 ; CHECK-NOMEMSSA-NEXT:    ret void
 ;
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
-; CHECK-NEXT:    store i32 0, i32* @G2, align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
+; CHECK-NEXT:    store i32 0, ptr @G2, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %V1 = load i32, i32* @G1
-  store i32 0, i32* @G2
-  store i32 %V1, i32* @G1
+  %V1 = load i32, ptr @G1
+  store i32 0, ptr @G2
+  store i32 %V1, ptr @G1
   ret void
 }
 
 ;; Check that memoryphi optimization happens during EarlyCSE, enabling
 ;; more load CSE opportunities.
-define void @test_memphiopt(i1 %c, i32* %p) {
+define void @test_memphiopt(i1 %c, ptr %p) {
 ; CHECK-NOMEMSSA-LABEL: @test_memphiopt(
 ; CHECK-NOMEMSSA-NEXT:  entry:
-; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
 ; CHECK-NOMEMSSA-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK-NOMEMSSA:       then:
-; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NOMEMSSA-NEXT:    br label [[END]]
 ; CHECK-NOMEMSSA:       end:
-; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, ptr @G1, align 4
 ; CHECK-NOMEMSSA-NEXT:    [[SUM:%.*]] = add i32 [[V1]], [[V2]]
-; CHECK-NOMEMSSA-NEXT:    store i32 [[SUM]], i32* @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[SUM]], ptr @G2, align 4
 ; CHECK-NOMEMSSA-NEXT:    ret void
 ;
 ; CHECK-LABEL: @test_memphiopt(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[SUM:%.*]] = add i32 [[V1]], [[V1]]
-; CHECK-NEXT:    store i32 [[SUM]], i32* @G2, align 4
+; CHECK-NEXT:    store i32 [[SUM]], ptr @G2, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %v1 = load i32, i32* @G1
+  %v1 = load i32, ptr @G1
   br i1 %c, label %then, label %end
 
 then:
-  %pv = load i32, i32* %p
-  store i32 %pv, i32* %p
+  %pv = load i32, ptr %p
+  store i32 %pv, ptr %p
   br label %end
 
 end:
-  %v2 = load i32, i32* @G1
+  %v2 = load i32, ptr @G1
   %sum = add i32 %v1, %v2
-  store i32 %sum, i32* @G2
+  store i32 %sum, ptr @G2
   ret void
 }
 
 
 ;; Check that MemoryPhi optimization and MemoryUse re-optimization
 ;; happens during EarlyCSE, enabling more load CSE opportunities.
-define void @test_memphiopt2(i1 %c, i32* %p) {
+define void @test_memphiopt2(i1 %c, ptr %p) {
 ; CHECK-NOMEMSSA-LABEL: @test_memphiopt2(
 ; CHECK-NOMEMSSA-NEXT:  entry:
-; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
-; CHECK-NOMEMSSA-NEXT:    store i32 [[V1]], i32* @G2, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[V1]], ptr @G2, align 4
 ; CHECK-NOMEMSSA-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK-NOMEMSSA:       then:
-; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NOMEMSSA-NEXT:    br label [[END]]
 ; CHECK-NOMEMSSA:       end:
-; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, i32* @G1, align 4
-; CHECK-NOMEMSSA-NEXT:    store i32 [[V2]], i32* @G3, align 4
+; CHECK-NOMEMSSA-NEXT:    [[V2:%.*]] = load i32, ptr @G1, align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[V2]], ptr @G3, align 4
 ; CHECK-NOMEMSSA-NEXT:    ret void
 ;
 ; CHECK-LABEL: @test_memphiopt2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[V1:%.*]] = load i32, i32* @G1, align 4
-; CHECK-NEXT:    store i32 [[V1]], i32* @G2, align 4
+; CHECK-NEXT:    [[V1:%.*]] = load i32, ptr @G1, align 4
+; CHECK-NEXT:    store i32 [[V1]], ptr @G2, align 4
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    store i32 [[V1]], i32* @G3, align 4
+; CHECK-NEXT:    store i32 [[V1]], ptr @G3, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %v1 = load i32, i32* @G1
-  store i32 %v1, i32* @G2
+  %v1 = load i32, ptr @G1
+  store i32 %v1, ptr @G2
   br i1 %c, label %then, label %end
 
 then:
-  %pv = load i32, i32* %p
-  store i32 %pv, i32* %p
+  %pv = load i32, ptr %p
+  store i32 %pv, ptr %p
   br label %end
 
 end:
-  %v2 = load i32, i32* @G1
-  store i32 %v2, i32* @G3
+  %v2 = load i32, ptr @G1
+  store i32 %v2, ptr @G3
   ret void
 }
 
 ;; Check that we respect lifetime.start/lifetime.end intrinsics when deleting
 ;; stores that, without the lifetime calls, would be writebacks.
-define void @test_writeback_lifetimes(i32* %p) {
+define void @test_writeback_lifetimes(ptr %p) {
 ; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes(
 ; CHECK-NOMEMSSA-NEXT:  entry:
-; CHECK-NOMEMSSA-NEXT:    [[Q:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 1
-; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P]], align 4
-; CHECK-NOMEMSSA-NEXT:    [[QV:%.*]] = load i32, i32* [[Q]], align 4
-; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
-; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
-; CHECK-NOMEMSSA-NEXT:    store i32 [[PV]], i32* [[P]], align 4
-; CHECK-NOMEMSSA-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[Q:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 1
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[QV:%.*]] = load i32, ptr [[Q]], align 4
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT:    store i32 [[PV]], ptr [[P]], align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[QV]], ptr [[Q]], align 4
 ; CHECK-NOMEMSSA-NEXT:    ret void
 ;
 ; CHECK-LABEL: @test_writeback_lifetimes(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[Q:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P]], align 4
-; CHECK-NEXT:    [[QV:%.*]] = load i32, i32* [[Q]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
-; CHECK-NEXT:    store i32 [[PV]], i32* [[P]], align 4
-; CHECK-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NEXT:    [[Q:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 1
+; CHECK-NEXT:    [[PV:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT:    [[QV:%.*]] = load i32, ptr [[Q]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT:    store i32 [[PV]], ptr [[P]], align 4
+; CHECK-NEXT:    store i32 [[QV]], ptr [[Q]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %q = getelementptr i32, i32* %p, i64 1
-  %pv = load i32, i32* %p
-  %qv = load i32, i32* %q
-  call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
-  call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
-  store i32 %pv, i32* %p
-  store i32 %qv, i32* %q
+  %q = getelementptr i32, ptr %p, i64 1
+  %pv = load i32, ptr %p
+  %qv = load i32, ptr %q
+  call void @llvm.lifetime.end.p0(i64 8, ptr %p)
+  call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+  store i32 %pv, ptr %p
+  store i32 %qv, ptr %q
   ret void
 }
 
 ;; Check that we respect lifetime.start/lifetime.end intrinsics when deleting
 ;; stores that, without the lifetime calls, would be writebacks.
-define void @test_writeback_lifetimes_multi_arg(i32* %p, i32* %q) {
+define void @test_writeback_lifetimes_multi_arg(ptr %p, ptr %q) {
 ; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes_multi_arg(
 ; CHECK-NOMEMSSA-NEXT:  entry:
-; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NOMEMSSA-NEXT:    [[QV:%.*]] = load i32, i32* [[Q:%.*]], align 4
-; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
-; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
-; CHECK-NOMEMSSA-NEXT:    store i32 [[PV]], i32* [[P]], align 4
-; CHECK-NOMEMSSA-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT:    store i32 [[PV]], ptr [[P]], align 4
+; CHECK-NOMEMSSA-NEXT:    store i32 [[QV]], ptr [[Q]], align 4
 ; CHECK-NOMEMSSA-NEXT:    ret void
 ;
 ; CHECK-LABEL: @test_writeback_lifetimes_multi_arg(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[PV:%.*]] = load i32, i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[QV:%.*]] = load i32, i32* [[Q:%.*]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i32(i64 8, i32* [[P]])
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i32(i64 8, i32* [[P]])
-; CHECK-NEXT:    store i32 [[PV]], i32* [[P]], align 4
-; CHECK-NEXT:    store i32 [[QV]], i32* [[Q]], align 4
+; CHECK-NEXT:    [[PV:%.*]] = load i32, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT:    store i32 [[PV]], ptr [[P]], align 4
+; CHECK-NEXT:    store i32 [[QV]], ptr [[Q]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %pv = load i32, i32* %p
-  %qv = load i32, i32* %q
-  call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
-  call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
-  store i32 %pv, i32* %p
-  store i32 %qv, i32* %q
+  %pv = load i32, ptr %p
+  %qv = load i32, ptr %q
+  call void @llvm.lifetime.end.p0(i64 8, ptr %p)
+  call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+  store i32 %pv, ptr %p
+  store i32 %qv, ptr %q
   ret void
 }
 
-declare void @llvm.lifetime.end.p0i8(i64, i32*)
-declare void @llvm.lifetime.start.p0i8(i64, i32*)
+declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(i64, ptr)

diff  --git a/llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll b/llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll
index 6b441c27aad61..2b51ba06e7706 100644
--- a/llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll
+++ b/llvm/test/Transforms/EarlyCSE/noalias-scope-decl.ll
@@ -3,30 +3,30 @@
 
 ; Store-to-load forwarding across a @llvm.experimental.noalias.scope.decl.
 
-define float @s2l(float* %p) {
+define float @s2l(ptr %p) {
 ; CHECK-LABEL: @s2l(
-; CHECK-NEXT:    store float 0.000000e+00, float* [[P:%.*]], align 4
+; CHECK-NEXT:    store float 0.000000e+00, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !0)
 ; CHECK-NEXT:    ret float 0.000000e+00
 ;
-  store float 0.0, float* %p
+  store float 0.0, ptr %p
   call void @llvm.experimental.noalias.scope.decl(metadata !0)
-  %t = load float, float* %p
+  %t = load float, ptr %p
   ret float %t
 }
 
 ; Redundant load elimination across a @llvm.experimental.noalias.scope.decl.
 
-define float @rle(float* %p) {
+define float @rle(ptr %p) {
 ; CHECK-LABEL: @rle(
-; CHECK-NEXT:    [[R:%.*]] = load float, float* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = load float, ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata !0)
 ; CHECK-NEXT:    [[T:%.*]] = fadd float [[R]], [[R]]
 ; CHECK-NEXT:    ret float [[T]]
 ;
-  %r = load float, float* %p
+  %r = load float, ptr %p
   call void @llvm.experimental.noalias.scope.decl(metadata !0)
-  %s = load float, float* %p
+  %s = load float, ptr %p
   %t = fadd float %r, %s
   ret float %t
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/phi.ll b/llvm/test/Transforms/EarlyCSE/phi.ll
index e9c86ec5d3740..3ad865d0c9cb5 100644
--- a/llvm/test/Transforms/EarlyCSE/phi.ll
+++ b/llvm/test/Transforms/EarlyCSE/phi.ll
@@ -3,7 +3,7 @@
 ; RUN: opt -basic-aa -early-cse-memssa -S < %s | FileCheck %s
 
 ; Most basic case, fully identical PHI nodes
-define void @test0(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test0(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test0(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -14,8 +14,8 @@ define void @test0(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -30,13 +30,13 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
 
 ; Fully identical PHI nodes, but order of operands 
diff ers
-define void @test1(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test1(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -47,8 +47,8 @@ define void @test1(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -63,13 +63,13 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v1, %b1 ], [ %v0, %b0 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
 
 ; Different incoming values in second PHI
-define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @negative_test2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -80,8 +80,8 @@ define void @negative_test2(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V2:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -96,11 +96,11 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v2, %b1 ] ; from %b0 takes %v2 instead of %v1
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
-define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @negative_test3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -111,8 +111,8 @@ define void @negative_test3(i32 %v0, i32 %v1, i32 %v2, i1 %c, i32* %d0, i32* %d1
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V2:%.*]], [[B1]] ], [ [[V0]], [[B0]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -127,11 +127,11 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v2, %b1 ], [ %v0, %b0 ] ; from %b0 takes %v2 instead of %v1
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
-define void @negative_test4(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @negative_test4(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @negative_test4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -142,8 +142,8 @@ define void @negative_test4(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V1]], [[B1]] ], [ [[V0]], [[B0]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -158,13 +158,13 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v1, %b1 ], [ %v0, %b0 ] ; incoming values are swapped
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
   ret void
 }
 
 ; Both PHI's are identical, but the first one has no uses, so ignore it.
-define void @test5(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test5(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -174,7 +174,7 @@ define void @test5(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -189,11 +189,11 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ] ; unused
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i1, i32* %d1
+  store i32 %i1, ptr %d1
   ret void
 }
 ; Second PHI has no uses
-define void @test6(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
+define void @test6(i32 %v0, i32 %v1, i1 %c, ptr %d0, ptr %d1) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -203,7 +203,7 @@ define void @test6(i32 %v0, i32 %v1, i1 %c, i32* %d0, i32* %d1) {
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -218,12 +218,12 @@ b1:
 end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ] ; unused
-  store i32 %i0, i32* %d0
+  store i32 %i0, ptr %d0
   ret void
 }
 
 ; Non-matching PHI node should be ignored without terminating CSE.
-define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -235,9 +235,9 @@ define void @test7(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1
 ; CHECK-NEXT:    [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT:    store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT:    store i16 [[IBAD]], ptr [[D2:%.*]], align 2
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -253,12 +253,12 @@ end:
   %iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
-  store i16 %iBAD, i16* %d2
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
+  store i16 %iBAD, ptr %d2
   ret void
 }
-define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
 ; CHECK-LABEL: @test8(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -270,9 +270,9 @@ define void @test8(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT:    store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT:    store i16 [[IBAD]], ptr [[D2:%.*]], align 2
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -288,12 +288,12 @@ end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
-  store i16 %iBAD, i16* %d2
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
+  store i16 %iBAD, ptr %d2
   ret void
 }
-define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1, i16* %d2) {
+define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, ptr %d0, ptr %d1, ptr %d2) {
 ; CHECK-LABEL: @test9(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[B0:%.*]], label [[B1:%.*]]
@@ -305,9 +305,9 @@ define void @test9(i32 %v0, i32 %v1, i16 %v2, i16 %v3, i1 %c, i32* %d0, i32* %d1
 ; CHECK-NEXT:    [[I0:%.*]] = phi i32 [ [[V0:%.*]], [[B0]] ], [ [[V1:%.*]], [[B1]] ]
 ; CHECK-NEXT:    [[I1:%.*]] = phi i32 [ [[V0]], [[B0]] ], [ [[V1]], [[B1]] ]
 ; CHECK-NEXT:    [[IBAD:%.*]] = phi i16 [ [[V2:%.*]], [[B0]] ], [ [[V3:%.*]], [[B1]] ]
-; CHECK-NEXT:    store i32 [[I0]], i32* [[D0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[I1]], i32* [[D1:%.*]], align 4
-; CHECK-NEXT:    store i16 [[IBAD]], i16* [[D2:%.*]], align 2
+; CHECK-NEXT:    store i32 [[I0]], ptr [[D0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[I1]], ptr [[D1:%.*]], align 4
+; CHECK-NEXT:    store i16 [[IBAD]], ptr [[D2:%.*]], align 2
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -323,8 +323,8 @@ end:
   %i0 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %i1 = phi i32 [ %v0, %b0 ], [ %v1, %b1 ]
   %iBAD = phi i16 [ %v2, %b0 ], [ %v3, %b1 ]
-  store i32 %i0, i32* %d0
-  store i32 %i1, i32* %d1
-  store i16 %iBAD, i16* %d2
+  store i32 %i0, ptr %d0
+  store i32 %i1, ptr %d1
+  store i16 %iBAD, ptr %d2
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/pr33406.ll b/llvm/test/Transforms/EarlyCSE/pr33406.ll
index e0d2cccb48ac1..0974f013050b4 100644
--- a/llvm/test/Transforms/EarlyCSE/pr33406.ll
+++ b/llvm/test/Transforms/EarlyCSE/pr33406.ll
@@ -8,7 +8,7 @@ define void @patatino() {
 ; CHECK-NEXT:  for.cond:
 ; CHECK-NEXT:    br i1 true, label [[IF_END:%.*]], label [[FOR_INC:%.*]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    [[TINKYWINKY:%.*]] = load i32, i32* @b, align 4
+; CHECK-NEXT:    [[TINKYWINKY:%.*]] = load i32, ptr @b, align 4
 ; CHECK-NEXT:    br i1 true, label [[FOR_INC]], label [[FOR_INC]]
 ; CHECK:       for.inc:
 ; CHECK-NEXT:    ret void
@@ -17,8 +17,8 @@ for.cond:
   br i1 true, label %if.end, label %for.inc
 
 if.end:
-  %tinkywinky = load i32, i32* @b
-  store i32 %tinkywinky, i32* @b
+  %tinkywinky = load i32, ptr @b
+  store i32 %tinkywinky, ptr @b
   br i1 true, label %for.inc, label %for.inc
 
 for.inc:

diff  --git a/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll b/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
index baa050a433d80..9b2ae3b7eba4c 100644
--- a/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
+++ b/llvm/test/Transforms/EarlyCSE/readnone-mayunwind.ll
@@ -3,16 +3,16 @@
 
 declare void @readnone_may_unwind() readnone
 
-define void @f(i32* %ptr) {
+define void @f(ptr %ptr) {
 ; CHECK-LABEL: @f(
-; CHECK-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
+; CHECK-NEXT:    store i32 100, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    call void @readnone_may_unwind()
-; CHECK-NEXT:    store i32 200, i32* [[PTR]], align 4
+; CHECK-NEXT:    store i32 200, ptr [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
 
-  store i32 100, i32* %ptr
+  store i32 100, ptr %ptr
   call void @readnone_may_unwind()
-  store i32 200, i32* %ptr
+  store i32 200, ptr %ptr
   ret void
 }

diff  --git a/llvm/test/Transforms/EarlyCSE/writeonly.ll b/llvm/test/Transforms/EarlyCSE/writeonly.ll
index 3c95efb012a86..af20ecfb90519 100644
--- a/llvm/test/Transforms/EarlyCSE/writeonly.ll
+++ b/llvm/test/Transforms/EarlyCSE/writeonly.ll
@@ -7,11 +7,11 @@ declare void @foo() nounwind
 define void @test() {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    call void @foo() #[[ATTR1:[0-9]+]]
-; CHECK-NEXT:    store i32 2, i32* @var, align 4
+; CHECK-NEXT:    store i32 2, ptr @var, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* @var
+  store i32 1, ptr @var
   call void @foo() writeonly
-  store i32 2, i32* @var
+  store i32 2, ptr @var
   ret void
 }


        


More information about the llvm-commits mailing list