[llvm] e45cf47 - [Bitcode] Remove auto-detection for typed pointers

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 14 04:39:32 PST 2022


Author: Nikita Popov
Date: 2022-12-14T13:38:20+01:00
New Revision: e45cf479231fc144b4e1eb8b3e3bd2f578b6907d

URL: https://github.com/llvm/llvm-project/commit/e45cf479231fc144b4e1eb8b3e3bd2f578b6907d
DIFF: https://github.com/llvm/llvm-project/commit/e45cf479231fc144b4e1eb8b3e3bd2f578b6907d.diff

LOG: [Bitcode] Remove auto-detection for typed pointers

Always read bitcode according to the -opaque-pointers mode. Do not
perform auto-detection to implicitly switch to typed pointers.

This is a step towards removing typed pointer support, and also
eliminates the class of problems where linking may fail if a typed
pointer module is loaded before an opaque pointer module. (The
latest place where this was encountered is D139924, but this has
previously been fixed in other places doing bitcode linking as well.)

Differential Revision: https://reviews.llvm.org/D139940

Added: 
    

Modified: 
    llvm/docs/OpaquePointers.rst
    llvm/lib/Bitcode/Reader/BitcodeReader.cpp
    llvm/test/Bitcode/alloca-addrspace.ll
    llvm/test/Bitcode/arm-intrinsics.ll
    llvm/test/Bitcode/atomic-no-syncscope.ll
    llvm/test/Bitcode/atomic.ll
    llvm/test/Bitcode/atomicrmw-upgrade.ll
    llvm/test/Bitcode/attributes-3.3.ll
    llvm/test/Bitcode/byval-upgrade.test
    llvm/test/Bitcode/callbr.ll
    llvm/test/Bitcode/cmpxchg-upgrade.ll
    llvm/test/Bitcode/cmpxchg.3.6.ll
    llvm/test/Bitcode/compatibility-3.6.ll
    llvm/test/Bitcode/compatibility-3.7.ll
    llvm/test/Bitcode/compatibility-3.8.ll
    llvm/test/Bitcode/compatibility-3.9.ll
    llvm/test/Bitcode/compatibility-4.0.ll
    llvm/test/Bitcode/compatibility-5.0.ll
    llvm/test/Bitcode/compatibility-6.0.ll
    llvm/test/Bitcode/constantsTest.3.2.ll
    llvm/test/Bitcode/conversionInstructions.3.2.ll
    llvm/test/Bitcode/dityperefs-3.8.ll
    llvm/test/Bitcode/dso_location.ll
    llvm/test/Bitcode/function-address-space-fwd-decl.ll
    llvm/test/Bitcode/getelementptr-zero-indices.ll
    llvm/test/Bitcode/global-variables.3.2.ll
    llvm/test/Bitcode/highLevelStructure.3.2.ll
    llvm/test/Bitcode/inalloca-upgrade.test
    llvm/test/Bitcode/inalloca.ll
    llvm/test/Bitcode/intrinsics-struct-upgrade.ll
    llvm/test/Bitcode/intrinsics-with-unnamed-types.ll
    llvm/test/Bitcode/invalid.test
    llvm/test/Bitcode/local-linkage-default-visibility.3.4.ll
    llvm/test/Bitcode/memInstructions.3.2.ll
    llvm/test/Bitcode/metadata-2.ll
    llvm/test/Bitcode/metadata.3.5.ll
    llvm/test/Bitcode/miscInstructions.3.2.ll
    llvm/test/Bitcode/nocfivalue.ll
    llvm/test/Bitcode/objectsize-upgrade-7.0.ll
    llvm/test/Bitcode/old-aliases.ll
    llvm/test/Bitcode/select.ll
    llvm/test/Bitcode/standardCIntrinsic.3.2.ll
    llvm/test/Bitcode/terminatorInstructions.3.2.ll
    llvm/test/Bitcode/thinlto-function-summary.ll
    llvm/test/Bitcode/thinlto-summary-local-5.0.ll
    llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll
    llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
    llvm/test/Bitcode/upgrade-annotation.ll
    llvm/test/Bitcode/upgrade-arc-attachedcall-bundle.ll
    llvm/test/Bitcode/upgrade-arc-runtime-calls-bitcast.ll
    llvm/test/Bitcode/upgrade-arc-runtime-calls.ll
    llvm/test/Bitcode/upgrade-elementtype.ll
    llvm/test/Bitcode/upgrade-global-ctors.ll
    llvm/test/Bitcode/upgrade-global-dtors.ll
    llvm/test/Bitcode/upgrade-inline-asm-elementtype.ll
    llvm/test/Bitcode/upgrade-ptr-annotation.ll
    llvm/test/Bitcode/upgrade-tbaa.ll
    llvm/test/Bitcode/upgrade-var-annotation.ll
    llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
    llvm/test/Bitcode/weak-cmpxchg-upgrade.ll

Removed: 
    llvm/test/Bitcode/Inputs/invalid-call-mismatched-explicit-type.bc
    llvm/test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc
    llvm/test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc
    llvm/test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc


################################################################################
diff  --git a/llvm/docs/OpaquePointers.rst b/llvm/docs/OpaquePointers.rst
index 9205e05575116..4ae3aa1043b38 100644
--- a/llvm/docs/OpaquePointers.rst
+++ b/llvm/docs/OpaquePointers.rst
@@ -287,10 +287,12 @@ The following typed pointer functionality has already been removed:
 * The ``CLANG_ENABLE_OPAQUE_POINTERS`` cmake flag is no longer supported.
 * C APIs that do not support opaque pointers (like ``LLVMBuildLoad``) are no
   longer supported.
+* Typed pointer bitcode is implicitly upgraded to use opaque pointers, unless
+  ``-opaque-pointers=0`` is passed.
 
 The following typed pointer functionality is still to be removed:
 
 * The ``-no-opaque-pointers`` cc1 flag, ``-opaque-pointers=0`` opt flag and
   ``-plugin-opt=no-opaque-pointers`` lto flag.
-* Auto-detection of typed pointers in bitcode and textual IR.
+* Auto-detection of typed pointers in textual IR.
 * Support for typed pointers in LLVM libraries.

diff  --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 47b297261e009..0aeb1bcf8a788 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -2359,8 +2359,6 @@ Error BitcodeReader::parseTypeTableBody() {
       if (!ResultTy ||
           !PointerType::isValidElementType(ResultTy))
         return error("Invalid type");
-      if (LLVM_UNLIKELY(!Context.hasSetOpaquePointersValue()))
-        Context.setOpaquePointers(false);
       ContainedIDs.push_back(Record[0]);
       ResultTy = PointerType::get(ResultTy, AddressSpace);
       break;
@@ -2368,9 +2366,7 @@ Error BitcodeReader::parseTypeTableBody() {
     case bitc::TYPE_CODE_OPAQUE_POINTER: { // OPAQUE_POINTER: [addrspace]
       if (Record.size() != 1)
         return error("Invalid opaque pointer record");
-      if (LLVM_UNLIKELY(!Context.hasSetOpaquePointersValue())) {
-        Context.setOpaquePointers(true);
-      } else if (Context.supportsTypedPointers())
+      if (Context.supportsTypedPointers())
         return error(
             "Opaque pointers are only supported in -opaque-pointers mode");
       unsigned AddressSpace = Record[0];

diff  --git a/llvm/test/Bitcode/Inputs/invalid-call-mismatched-explicit-type.bc b/llvm/test/Bitcode/Inputs/invalid-call-mismatched-explicit-type.bc
deleted file mode 100644
index 802d28edbf445..0000000000000
Binary files a/llvm/test/Bitcode/Inputs/invalid-call-mismatched-explicit-type.bc and /dev/null 
diff er

diff  --git a/llvm/test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc b/llvm/test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc
deleted file mode 100644
index 0d828e8c3296d..0000000000000
Binary files a/llvm/test/Bitcode/Inputs/invalid-gep-mismatched-explicit-type.bc and /dev/null 
diff er

diff  --git a/llvm/test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc b/llvm/test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc
deleted file mode 100644
index 3af687f056f98..0000000000000
Binary files a/llvm/test/Bitcode/Inputs/invalid-gep-operator-mismatched-explicit-type.bc and /dev/null 
diff er

diff  --git a/llvm/test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc b/llvm/test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc
deleted file mode 100644
index 1b8cbc7513165..0000000000000
Binary files a/llvm/test/Bitcode/Inputs/invalid-load-mismatched-explicit-type.bc and /dev/null 
diff er

diff  --git a/llvm/test/Bitcode/alloca-addrspace.ll b/llvm/test/Bitcode/alloca-addrspace.ll
index 48af7ab5015df..7ae7cc05d147d 100644
--- a/llvm/test/Bitcode/alloca-addrspace.ll
+++ b/llvm/test/Bitcode/alloca-addrspace.ll
@@ -2,14 +2,14 @@
 
 target datalayout = "A2"
 
-; CHECK-LABEL: define i8 addrspace(2)* @alloca_addrspace_2() {
+; CHECK-LABEL: define ptr addrspace(2) @alloca_addrspace_2() {
 ; CHECK: %alloca = alloca i8, align 1, addrspace(2)
 define i8 addrspace(2)* @alloca_addrspace_2() {
   %alloca = alloca i8, addrspace(2)
   ret i8 addrspace(2)* %alloca
 }
 
-; CHECK-LABEL: define i8 addrspace(5)* @alloca_addrspace_5() {
+; CHECK-LABEL: define ptr addrspace(5) @alloca_addrspace_5() {
 ; CHECK: %alloca = alloca i8, align 1, addrspace(5)
 define i8 addrspace(5)* @alloca_addrspace_5() {
   %alloca = alloca i8, addrspace(5)

diff  --git a/llvm/test/Bitcode/arm-intrinsics.ll b/llvm/test/Bitcode/arm-intrinsics.ll
index 42066c6029821..be52c59b5518b 100644
--- a/llvm/test/Bitcode/arm-intrinsics.ll
+++ b/llvm/test/Bitcode/arm-intrinsics.ll
@@ -1,14 +1,14 @@
 ; RUN: llvm-dis < %S/arm-intrinsics.bc | FileCheck %s
 
 define void @f(i32* %p) {
-; CHECK: call i32 @llvm.arm.ldrex.p0i32(i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.ldrex.p0(ptr elementtype(i32)
   %a = call i32 @llvm.arm.ldrex.p0i32(i32* %p)
-; CHECK: call i32 @llvm.arm.strex.p0i32(i32 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.strex.p0(i32 0, ptr elementtype(i32)
   %c = call i32 @llvm.arm.strex.p0i32(i32 0, i32* %p)
 
-; CHECK: call i32 @llvm.arm.ldaex.p0i32(i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.ldaex.p0(ptr elementtype(i32)
   %a2 = call i32 @llvm.arm.ldaex.p0i32(i32* %p)
-; CHECK: call i32 @llvm.arm.stlex.p0i32(i32 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.arm.stlex.p0(i32 0, ptr elementtype(i32)
   %c2 = call i32 @llvm.arm.stlex.p0i32(i32 0, i32* %p)
   ret void
 }

diff  --git a/llvm/test/Bitcode/atomic-no-syncscope.ll b/llvm/test/Bitcode/atomic-no-syncscope.ll
index a57507bc81468..8dd0a9eaffe65 100644
--- a/llvm/test/Bitcode/atomic-no-syncscope.ll
+++ b/llvm/test/Bitcode/atomic-no-syncscope.ll
@@ -3,15 +3,15 @@
 ; Backwards compatibility test: make sure we can process bitcode without
 ; synchronization scope names encoded in it.
 
-; CHECK: load atomic i32, i32* %x unordered, align 4
-; CHECK: load atomic volatile i32, i32* %x syncscope("singlethread") acquire, align 4
-; CHECK: store atomic i32 3, i32* %x release, align 4
-; CHECK: store atomic volatile i32 3, i32* %x syncscope("singlethread") monotonic, align 4
-; CHECK: cmpxchg i32* %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
-; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel acquire
-; CHECK: cmpxchg i32* %x, i32 42, i32 0 acq_rel monotonic
-; CHECK: cmpxchg weak i32* %x, i32 13, i32 0 seq_cst monotonic
-; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
-; CHECK: atomicrmw volatile xchg  i32* %x, i32 10 monotonic
+; CHECK: load atomic i32, ptr %x unordered, align 4
+; CHECK: load atomic volatile i32, ptr %x syncscope("singlethread") acquire, align 4
+; CHECK: store atomic i32 3, ptr %x release, align 4
+; CHECK: store atomic volatile i32 3, ptr %x syncscope("singlethread") monotonic, align 4
+; CHECK: cmpxchg ptr %x, i32 1, i32 0 syncscope("singlethread") monotonic monotonic
+; CHECK: cmpxchg volatile ptr %x, i32 0, i32 1 acq_rel acquire
+; CHECK: cmpxchg ptr %x, i32 42, i32 0 acq_rel monotonic
+; CHECK: cmpxchg weak ptr %x, i32 13, i32 0 seq_cst monotonic
+; CHECK: atomicrmw add ptr %x, i32 10 seq_cst
+; CHECK: atomicrmw volatile xchg  ptr %x, i32 10 monotonic
 ; CHECK: fence syncscope("singlethread") release
 ; CHECK: fence seq_cst

diff  --git a/llvm/test/Bitcode/atomic.ll b/llvm/test/Bitcode/atomic.ll
index bef3f2712935a..3c9ddbd6446ee 100644
--- a/llvm/test/Bitcode/atomic.ll
+++ b/llvm/test/Bitcode/atomic.ll
@@ -3,16 +3,16 @@
 
 define void @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
   cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
-  ; CHECK: cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+  ; CHECK: cmpxchg ptr %addr, i32 %desired, i32 %new seq_cst seq_cst
 
   cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
-  ; CHECK: cmpxchg volatile i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+  ; CHECK: cmpxchg volatile ptr %addr, i32 %desired, i32 %new seq_cst monotonic
 
   cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
-  ; CHECK: cmpxchg weak i32* %addr, i32 %desired, i32 %new acq_rel acquire
+  ; CHECK: cmpxchg weak ptr %addr, i32 %desired, i32 %new acq_rel acquire
 
   cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
-  ; CHECK: cmpxchg weak volatile i32* %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
+  ; CHECK: cmpxchg weak volatile ptr %addr, i32 %desired, i32 %new syncscope("singlethread") release monotonic
 
   ret void
 }

diff  --git a/llvm/test/Bitcode/atomicrmw-upgrade.ll b/llvm/test/Bitcode/atomicrmw-upgrade.ll
index 62c6aa7b16cb5..4cbc96c18fc4b 100644
--- a/llvm/test/Bitcode/atomicrmw-upgrade.ll
+++ b/llvm/test/Bitcode/atomicrmw-upgrade.ll
@@ -5,7 +5,7 @@
 ; before the IR change on this file.
 
 ; CHECK: @atomicrmw
-; CHECK:   %b = atomicrmw add i32* %a, i32 %i acquire
+; CHECK:   %b = atomicrmw add ptr %a, i32 %i acquire
 define void @atomicrmw(i32* %a, i32 %i) {
     %b = atomicrmw add i32* %a, i32 %i acquire
     ret void

diff  --git a/llvm/test/Bitcode/attributes-3.3.ll b/llvm/test/Bitcode/attributes-3.3.ll
index f9aef5d2f612d..b471c52d2609c 100644
--- a/llvm/test/Bitcode/attributes-3.3.ll
+++ b/llvm/test/Bitcode/attributes-3.3.ll
@@ -30,7 +30,7 @@ define void @f4(i8 inreg %0)
 }
 
 define void @f5(i8* sret(i8) %0)
-; CHECK: define void @f5(i8* sret(i8) %0)
+; CHECK: define void @f5(ptr sret(i8) %0)
 {
         ret void;
 }
@@ -42,19 +42,19 @@ define void @f6() nounwind
 }
 
 define void @f7(i8* noalias %0)
-; CHECK: define void @f7(i8* noalias %0)
+; CHECK: define void @f7(ptr noalias %0)
 {
         ret void;
 }
 
 define void @f8(i8* byval(i8) %0)
-; CHECK: define void @f8(i8* byval(i8) %0)
+; CHECK: define void @f8(ptr byval(i8) %0)
 {
         ret void;
 }
 
 define void @f9(i8* nest %0)
-; CHECK: define void @f9(i8* nest %0)
+; CHECK: define void @f9(ptr nest %0)
 {
         ret void;
 }
@@ -108,7 +108,7 @@ define void @f17(i8 align 4 %0)
 }
 
 define void @f18(i8* nocapture %0)
-; CHECK: define void @f18(i8* nocapture %0)
+; CHECK: define void @f18(ptr nocapture %0)
 {
         ret void;
 }

diff  --git a/llvm/test/Bitcode/byval-upgrade.test b/llvm/test/Bitcode/byval-upgrade.test
index 1012bf728830f..f44e5d348c2bb 100644
--- a/llvm/test/Bitcode/byval-upgrade.test
+++ b/llvm/test/Bitcode/byval-upgrade.test
@@ -3,5 +3,5 @@ RUN: llvm-dis %p/Inputs/byval-upgrade.bc -o - | FileCheck %s
 Make sure we upgrade old-stile IntAttribute byval records to a fully typed
 version correctly.
 
-CHECK: call void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr)
-CHECK: invoke void @bar({ i32*, i8 }* byval({ i32*, i8 }) %ptr)
+CHECK: call void @bar(ptr byval({ ptr, i8 }) %ptr)
+CHECK: invoke void @bar(ptr byval({ ptr, i8 }) %ptr)

diff  --git a/llvm/test/Bitcode/callbr.ll b/llvm/test/Bitcode/callbr.ll
index de9c8650d7df0..5f6283dd035d6 100644
--- a/llvm/test/Bitcode/callbr.ll
+++ b/llvm/test/Bitcode/callbr.ll
@@ -1,8 +1,7 @@
-; RUN:  llvm-dis < %s.bc | FileCheck %s --check-prefixes=CHECK,CHECK-TYPED
+; RUN:  llvm-dis < %s.bc | FileCheck %s
 ; callbr.ll.bc was generated by passing this file to llvm-as.
 
-; RUN: llvm-as < %s | llvm-dis | FileCheck %s --check-prefixes=CHECK,CHECK-TYPED
-; RUN: llvm-as -opaque-pointers < %s | llvm-dis -opaque-pointers | FileCheck %s --check-prefixes=CHECK,CHECK-OPAQUE
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
 
 define i32 @test_asm_goto(i32 %x){
 entry:
@@ -30,8 +29,7 @@ fail2:
 
 define i32 @test_asm_goto3(i32 %x){
 entry:
-; CHECK-TYPED:      callbr void asm "", "r,i,!i"(i32 %x, i8* blockaddress(@test_asm_goto3, %unrelated))
-; CHECK-OPAQUE:     callbr void asm "", "r,i,!i"(i32 %x, ptr blockaddress(@test_asm_goto3, %unrelated))
+; CHECK:     callbr void asm "", "r,i,!i"(i32 %x, ptr blockaddress(@test_asm_goto3, %unrelated))
 ; CHECK-NEXT: to label %normal [label %fail]
   callbr void asm "", "r,i,!i"(i32 %x, i8* blockaddress(@test_asm_goto3, %unrelated)) to label %normal [label %fail]
 normal:

diff  --git a/llvm/test/Bitcode/cmpxchg-upgrade.ll b/llvm/test/Bitcode/cmpxchg-upgrade.ll
index 125729e99cd90..c00b3c014660b 100644
--- a/llvm/test/Bitcode/cmpxchg-upgrade.ll
+++ b/llvm/test/Bitcode/cmpxchg-upgrade.ll
@@ -6,19 +6,19 @@
 
 define void @test(i32* %addr) {
    cmpxchg i32* %addr, i32 42, i32 0 monotonic
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 monotonic monotonic
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 monotonic monotonic
 
    cmpxchg i32* %addr, i32 42, i32 0 acquire
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acquire acquire
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 acquire acquire
 
    cmpxchg i32* %addr, i32 42, i32 0 release
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 release monotonic
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 release monotonic
 
    cmpxchg i32* %addr, i32 42, i32 0 acq_rel
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 acq_rel acquire
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 acq_rel acquire
 
    cmpxchg i32* %addr, i32 42, i32 0 seq_cst
-; CHECK: cmpxchg i32* %addr, i32 42, i32 0 seq_cst seq_cst
+; CHECK: cmpxchg ptr %addr, i32 42, i32 0 seq_cst seq_cst
 
    ret void
 }

diff  --git a/llvm/test/Bitcode/cmpxchg.3.6.ll b/llvm/test/Bitcode/cmpxchg.3.6.ll
index bec51a128c336..f3cc77f4b1b5c 100644
--- a/llvm/test/Bitcode/cmpxchg.3.6.ll
+++ b/llvm/test/Bitcode/cmpxchg.3.6.ll
@@ -5,7 +5,7 @@ entry:
   br label %a
 b:
   cmpxchg i32* %x, i32 %y, i32 %z acquire acquire
-; CHECK: cmpxchg i32* %x, i32 %y, i32 %z acquire acquire
+; CHECK: cmpxchg ptr %x, i32 %y, i32 %z acquire acquire
   ret void
 a:
   %y = add i32 %y.orig, 1

diff  --git a/llvm/test/Bitcode/compatibility-3.6.ll b/llvm/test/Bitcode/compatibility-3.6.ll
index 5748c236afcb3..7fc28c5175c1b 100644
--- a/llvm/test/Bitcode/compatibility-3.6.ll
+++ b/llvm/test/Bitcode/compatibility-3.6.ll
@@ -58,7 +58,7 @@ $comdat2 = comdat any
 @const.float = constant double 0.0
 ; CHECK: @const.float = constant double 0.0
 @const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
 %const.struct.type = type { i32, i8 }
 %const.struct.type.packed = type <{ i32, i8 }>
 @const.struct = constant %const.struct.type { i32 -1, i8 undef }
@@ -161,13 +161,13 @@ $comdat2 = comdat any
 @g.used3 = global i8 0
 declare void @g.f1()
 @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
 @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
 @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 
 ;; Aliases
 ; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
@@ -175,51 +175,51 @@ declare void @g.f1()
 
 ; Aliases -- Linkage
 @a.private = private alias i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
 @a.internal = internal alias i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
 @a.linkonce = linkonce alias i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
 @a.weak = weak alias i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
 @a.linkonce_odr = linkonce_odr alias i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
 @a.weak_odr = weak_odr alias i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
 @a.external = external alias i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
 
 ; Aliases -- Visibility
 @a.default = default alias i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
 @a.hidden = hidden alias i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
 @a.protected = protected alias i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
 
 ; Aliases -- DLLStorageClass
 @a.dlldefault = default alias i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
 @a.dllimport = dllimport alias i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
 @a.dllexport = dllexport alias i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
 
 ; Aliases -- ThreadLocal
 @a.notthreadlocal = alias i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
 @a.generaldynamic = thread_local alias i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
 @a.localdynamic = thread_local(localdynamic) alias i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
 @a.initialexec = thread_local(initialexec) alias i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
 @a.localexec = thread_local(localexec) alias i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
 
 ; Aliases -- unnamed_addr
 @a.unnamed_addr = unnamed_addr alias i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
 
 ;; Functions
 ; Format: define [linkage] [visibility] [DLLStorageClass]
@@ -384,17 +384,17 @@ declare zeroext i64 @f.zeroext()
 declare signext i64 @f.signext()
 ; CHECK: declare signext i64 @f.signext()
 declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
 declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
 declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
 declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
 declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
 declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
 
 ; Functions -- Parameter attributes
 declare void @f.param.zeroext(i8 zeroext)
@@ -404,23 +404,23 @@ declare void @f.param.signext(i8 signext)
 declare void @f.param.inreg(i8 inreg)
 ; CHECK: declare void @f.param.inreg(i8 inreg)
 declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
 declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
 declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
 declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
 declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
 declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
 declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
 declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
 declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
 
 ; Functions -- unnamed_addr
 declare void @f.unnamed_addr() unnamed_addr
@@ -538,43 +538,43 @@ declare void @f.prefixarray() prefix [4 x i32] [i32 0, i32 1, i32 2, i32 3]
 ;; Atomic Memory Ordering Constraints
 define void @atomics(i32* %word) {
   %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
-  ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+  ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
   %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
-  ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+  ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
   %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
-  ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+  ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
   %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
-  ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+  ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
   %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
-  ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+  ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
   %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
-  ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+  ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
   %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
-  ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+  ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
   %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
-  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
   %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
-  ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+  ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
   %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
-  ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+  ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
   %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
-  ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+  ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
   %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
-  ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+  ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
   %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
-  ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+  ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
   %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
-  ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+  ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
   %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
-  ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+  ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
   %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
-  ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+  ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
   %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
-  ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+  ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
   %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
   %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
   fence acquire
   ; CHECK: fence acquire
   fence release
@@ -586,18 +586,18 @@ define void @atomics(i32* %word) {
 
   ; XXX: The parser spits out the load type here.
   %ld.1 = load atomic i32* %word monotonic, align 4
-  ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+  ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
   %ld.2 = load atomic volatile i32* %word acquire, align 8
-  ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+  ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
   %ld.3 = load atomic volatile i32* %word syncscope("singlethread") seq_cst, align 16
-  ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+  ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
 
   store atomic i32 23, i32* %word monotonic, align 4
-  ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+  ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
   store atomic volatile i32 24, i32* %word monotonic, align 4
-  ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+  ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
   store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
-  ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+  ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
   ret void
 }
 
@@ -620,17 +620,17 @@ define void @fastmathflags(float %op1, float %op2) {
 %opaquety = type opaque
 define void @typesystem() {
   %p0 = bitcast i8* null to i32 (i32)*
-  ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+  ; CHECK: %p0 = bitcast ptr null to ptr
   %p1 = bitcast i8* null to void (i8*)*
-  ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+  ; CHECK: %p1 = bitcast ptr null to ptr
   %p2 = bitcast i8* null to i32 (i8*, ...)*
-  ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+  ; CHECK: %p2 = bitcast ptr null to ptr
   %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
-  ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+  ; CHECK: %p3 = bitcast ptr null to ptr
   %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
-  ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+  ; CHECK: %p4 = bitcast ptr null to ptr
   %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
-  ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+  ; CHECK: %p5 = bitcast ptr null to ptr
 
   %t0 = alloca i1942652
   ; CHECK: %t0 = alloca i1942652
@@ -649,7 +649,7 @@ define void @typesystem() {
   %t7 = alloca x86_mmx
   ; CHECK: %t7 = alloca x86_mmx
   %t8 = alloca %opaquety*
-  ; CHECK: %t8 = alloca %opaquety*
+  ; CHECK: %t8 = alloca ptr
 
   ret void
 }
@@ -667,7 +667,7 @@ define void @inlineasm(i32 %arg) {
 
 ; Instructions -- Terminators
 define void @instructions.terminators(i8 %val) { ; XXX: landingpad changed.
-; CHECK: define void @instructions.terminators(i8 %val) personality i32 ()* @personality_handler
+; CHECK: define void @instructions.terminators(i8 %val) personality ptr @personality_handler
 
   br i1 false, label %iftrue, label %iffalse
   ; CHECK: br i1 false, label %iftrue, label %iffalse
@@ -697,9 +697,9 @@ defaultdest.1:
 defaultdest.2:
 
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
 
   invoke fastcc void @f.fastcc()
   ; CHECK: invoke fastcc void @f.fastcc()
@@ -837,17 +837,17 @@ define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
 
   ; XXX: The parser spits out the load type here.
   getelementptr { i8, i32 }* %up.ptr, i8 0
-  ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+  ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
   getelementptr <{ i8, i32 }>* %p.ptr, i8 1
-  ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+  ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
   getelementptr [3 x i8]* %arr.ptr, i8 2
-  ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+  ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
   getelementptr { i8, { i32 } }* %n.ptr, i32 0, i32 1
-  ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+  ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
   getelementptr inbounds { i8, { i32 } }* %n.ptr, i32 1, i32 0
-  ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+  ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
   getelementptr <2 x i8*> %pvec, <2 x i64> %offsets
-  ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+  ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
 
   ret void
 }
@@ -863,14 +863,14 @@ define void @instructions.memops(i32** %base) {
   ; CHECK: alloca inalloca i32, i8 4, align 4
 
   load i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
-  ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
+  ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
   load volatile i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
-  ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
+  ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9
 
   store i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
   store volatile i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
 
   ret void
 }
@@ -896,13 +896,13 @@ define void @instructions.conversions() {
   sitofp i32 -1 to float
   ; CHECK: sitofp i32 -1 to float
   ptrtoint i8* null to i64
-  ; CHECK: ptrtoint i8* null to i64
+  ; CHECK: ptrtoint ptr null to i64
   inttoptr i64 0 to i8*
-  ; CHECK: inttoptr i64 0 to i8*
+  ; CHECK: inttoptr i64 0 to ptr
   bitcast i32 0 to i32
   ; CHECK: bitcast i32 0 to i32
   addrspacecast i32* null to i32 addrspace(1)*
-  ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+  ; CHECK: addrspacecast ptr null to ptr addrspace(1)
 
   ret void
 }
@@ -984,16 +984,16 @@ exit:
   ; CHECK: call void @f.nobuiltin() #36
 
   call fastcc noalias i32* @f.noalias() noinline
-  ; CHECK: call fastcc noalias i32* @f.noalias() #11
+  ; CHECK: call fastcc noalias ptr @f.noalias() #11
   tail call ghccc nonnull i32* @f.nonnull() minsize
-  ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #6
+  ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #6
 
   ret void
 }
 
 define void @instructions.call_musttail(i8* inalloca %val) {
   musttail call void @f.param.inalloca(i8* inalloca %val)
-  ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+  ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
 
   ret void
 }
@@ -1003,7 +1003,7 @@ declare void @llvm.donothing() nounwind readnone
 declare i32 @personality_handler()
 
 define void @instructions.landingpad() {
-; CHECK: define void @instructions.landingpad() personality i32 ()* @personality_handler
+; CHECK: define void @instructions.landingpad() personality ptr @personality_handler
 
   invoke void @llvm.donothing() to label %proceed unwind label %catch1
   invoke void @llvm.donothing() to label %proceed unwind label %catch2
@@ -1025,7 +1025,7 @@ catch2:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch3:
@@ -1035,9 +1035,9 @@ catch3:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch4:
@@ -1063,16 +1063,16 @@ define void @instructions.va_arg(i8* %v, ...) {
   %ap2 = bitcast i8** %ap to i8*
 
   call void @llvm.va_start(i8* %ap2)
-  ; CHECK: call void @llvm.va_start(i8* %ap2)
+  ; CHECK: call void @llvm.va_start(ptr %ap2)
 
   va_arg i8* %ap2, i32
-  ; CHECK: va_arg i8* %ap2, i32
+  ; CHECK: va_arg ptr %ap2, i32
 
   call void @llvm.va_copy(i8* %v, i8* %ap2)
-  ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+  ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
 
   call void @llvm.va_end(i8* %ap2)
-  ; CHECK: call void @llvm.va_end(i8* %ap2)
+  ; CHECK: call void @llvm.va_end(ptr %ap2)
 
   ret void
 }
@@ -1084,14 +1084,14 @@ declare void @llvm.gcwrite(i8*, i8*, i8**)
 define void @intrinsics.gc() gc "shadow-stack" {
   %ptrloc = alloca i8*
   call void @llvm.gcroot(i8** %ptrloc, i8* null)
-  ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+  ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
 
   call i8* @llvm.gcread(i8* null, i8** %ptrloc)
-  ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+  ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
 
   %ref = alloca i8
   call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
-  ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+  ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
 
   ret void
 }
@@ -1114,9 +1114,9 @@ declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
 !10 = !{!"rax"}
 define void @intrinsics.codegen() {
   call i8* @llvm.returnaddress(i32 1)
-  ; CHECK: call i8* @llvm.returnaddress(i32 1)
+  ; CHECK: call ptr @llvm.returnaddress(i32 1)
   call i8* @llvm.frameaddress(i32 1)
-  ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+  ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
 
   call i32 @llvm.read_register.i32(metadata !10)
   ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
@@ -1128,12 +1128,12 @@ define void @intrinsics.codegen() {
   ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
 
   %stack = call i8* @llvm.stacksave()
-  ; CHECK: %stack = call i8* @llvm.stacksave()
+  ; CHECK: %stack = call ptr @llvm.stacksave()
   call void @llvm.stackrestore(i8* %stack)
-  ; CHECK: call void @llvm.stackrestore(i8* %stack)
+  ; CHECK: call void @llvm.stackrestore(ptr %stack)
 
   call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
-  ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+  ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
 
   call void @llvm.pcmarker(i32 1)
   ; CHECK: call void @llvm.pcmarker(i32 1)
@@ -1142,10 +1142,10 @@ define void @intrinsics.codegen() {
   ; CHECK: call i64 @llvm.readcyclecounter()
 
   call void @llvm.clear_cache(i8* null, i8* null)
-  ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+  ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
 
   call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
-  ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+  ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
 
   ret void
 }

diff  --git a/llvm/test/Bitcode/compatibility-3.7.ll b/llvm/test/Bitcode/compatibility-3.7.ll
index 513d6feb8d688..c9ca841fdf327 100644
--- a/llvm/test/Bitcode/compatibility-3.7.ll
+++ b/llvm/test/Bitcode/compatibility-3.7.ll
@@ -38,7 +38,7 @@ $comdat.samesize = comdat samesize
 @const.float = constant double 0.0
 ; CHECK: @const.float = constant double 0.0
 @const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
 %const.struct.type = type { i32, i8 }
 %const.struct.type.packed = type <{ i32, i8 }>
 @const.struct = constant %const.struct.type { i32 -1, i8 undef }
@@ -161,13 +161,13 @@ $comdat2 = comdat any
 @g.used3 = global i8 0
 declare void @g.f1()
 @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
 @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
 @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 
 ;; Aliases
 ; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
@@ -175,51 +175,51 @@ declare void @g.f1()
 
 ; Aliases -- Linkage
 @a.private = private alias i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
 @a.internal = internal alias i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
 @a.linkonce = linkonce alias i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
 @a.weak = weak alias i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
 @a.linkonce_odr = linkonce_odr alias i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
 @a.weak_odr = weak_odr alias i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
 @a.external = external alias i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
 
 ; Aliases -- Visibility
 @a.default = default alias i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
 @a.hidden = hidden alias i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
 @a.protected = protected alias i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
 
 ; Aliases -- DLLStorageClass
 @a.dlldefault = default alias i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
 @a.dllimport = dllimport alias i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
 @a.dllexport = dllexport alias i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
 
 ; Aliases -- ThreadLocal
 @a.notthreadlocal = alias i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
 @a.generaldynamic = thread_local alias i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
 @a.localdynamic = thread_local(localdynamic) alias i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
 @a.initialexec = thread_local(initialexec) alias i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
 @a.localexec = thread_local(localexec) alias i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
 
 ; Aliases -- unnamed_addr
 @a.unnamed_addr = unnamed_addr alias i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
 
 ;; Functions
 ; Format: define [linkage] [visibility] [DLLStorageClass]
@@ -384,23 +384,23 @@ declare zeroext i64 @f.zeroext()
 declare signext i64 @f.signext()
 ; CHECK: declare signext i64 @f.signext()
 declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
 declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
 declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
 declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
 declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
 declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
 declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
 declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
 declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
 
 ; Functions -- Parameter attributes
 declare void @f.param.zeroext(i8 zeroext)
@@ -410,25 +410,25 @@ declare void @f.param.signext(i8 signext)
 declare void @f.param.inreg(i8 inreg)
 ; CHECK: declare void @f.param.inreg(i8 inreg)
 declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
 declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
 declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
 declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
 declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
 declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
 declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
 declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
 declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
 declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
 
 ; Functions -- unnamed_addr
 declare void @f.unnamed_addr() unnamed_addr
@@ -571,7 +571,7 @@ normal:
 declare i32 @f.personality_handler()
 ; CHECK: declare i32 @f.personality_handler()
 define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
   invoke void @llvm.donothing() to label %normal unwind label %exception
 exception:
   %cleanup = landingpad i32 cleanup
@@ -583,43 +583,43 @@ normal:
 ;; Atomic Memory Ordering Constraints
 define void @atomics(i32* %word) {
   %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
-  ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+  ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
   %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
-  ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+  ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
   %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
-  ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+  ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
   %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
-  ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+  ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
   %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
-  ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+  ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
   %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
-  ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+  ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
   %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
-  ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+  ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
   %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
-  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
   %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
-  ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+  ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
   %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
-  ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+  ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
   %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
-  ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+  ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
   %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
-  ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+  ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
   %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
-  ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+  ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
   %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
-  ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+  ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
   %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
-  ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+  ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
   %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
-  ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+  ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
   %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
-  ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+  ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
   %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
   %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
   fence acquire
   ; CHECK: fence acquire
   fence release
@@ -630,18 +630,18 @@ define void @atomics(i32* %word) {
   ; CHECK: fence syncscope("singlethread") seq_cst
 
   %ld.1 = load atomic i32, i32* %word monotonic, align 4
-  ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+  ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
   %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
-  ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+  ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
   %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
-  ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+  ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
 
   store atomic i32 23, i32* %word monotonic, align 4
-  ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+  ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
   store atomic volatile i32 24, i32* %word monotonic, align 4
-  ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+  ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
   store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
-  ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+  ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
   ret void
 }
 
@@ -664,17 +664,17 @@ define void @fastmathflags(float %op1, float %op2) {
 %opaquety = type opaque
 define void @typesystem() {
   %p0 = bitcast i8* null to i32 (i32)*
-  ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+  ; CHECK: %p0 = bitcast ptr null to ptr
   %p1 = bitcast i8* null to void (i8*)*
-  ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+  ; CHECK: %p1 = bitcast ptr null to ptr
   %p2 = bitcast i8* null to i32 (i8*, ...)*
-  ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+  ; CHECK: %p2 = bitcast ptr null to ptr
   %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
-  ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+  ; CHECK: %p3 = bitcast ptr null to ptr
   %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
-  ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+  ; CHECK: %p4 = bitcast ptr null to ptr
   %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
-  ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+  ; CHECK: %p5 = bitcast ptr null to ptr
 
   %t0 = alloca i1942652
   ; CHECK: %t0 = alloca i1942652
@@ -693,7 +693,7 @@ define void @typesystem() {
   %t7 = alloca x86_mmx
   ; CHECK: %t7 = alloca x86_mmx
   %t8 = alloca %opaquety*
-  ; CHECK: %t8 = alloca %opaquety*
+  ; CHECK: %t8 = alloca ptr
 
   ret void
 }
@@ -739,9 +739,9 @@ defaultdest.1:
 defaultdest.2:
 
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
 
   invoke fastcc void @f.fastcc()
   ; CHECK: invoke fastcc void @f.fastcc()
@@ -878,17 +878,17 @@ define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
   %n.ptr = alloca { i8, { i32 } }
 
   getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
-  ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+  ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
   getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
-  ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+  ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
   getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
-  ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+  ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
   getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
-  ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+  ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
   getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
-  ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+  ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
   getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
-  ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+  ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
 
   ret void
 }
@@ -904,14 +904,14 @@ define void @instructions.memops(i32** %base) {
   ; CHECK: alloca inalloca i32, i8 4, align 4
 
   load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
   load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
 
   store i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
   store volatile i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
 
   ret void
 }
@@ -937,13 +937,13 @@ define void @instructions.conversions() {
   sitofp i32 -1 to float
   ; CHECK: sitofp i32 -1 to float
   ptrtoint i8* null to i64
-  ; CHECK: ptrtoint i8* null to i64
+  ; CHECK: ptrtoint ptr null to i64
   inttoptr i64 0 to i8*
-  ; CHECK: inttoptr i64 0 to i8*
+  ; CHECK: inttoptr i64 0 to ptr
   bitcast i32 0 to i32
   ; CHECK: bitcast i32 0 to i32
   addrspacecast i32* null to i32 addrspace(1)*
-  ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+  ; CHECK: addrspacecast ptr null to ptr addrspace(1)
 
   ret void
 }
@@ -1025,16 +1025,16 @@ exit:
   ; CHECK: call void @f.nobuiltin() #39
 
   call fastcc noalias i32* @f.noalias() noinline
-  ; CHECK: call fastcc noalias i32* @f.noalias() #12
+  ; CHECK: call fastcc noalias ptr @f.noalias() #12
   tail call ghccc nonnull i32* @f.nonnull() minsize
-  ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+  ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
 
   ret void
 }
 
 define void @instructions.call_musttail(i8* inalloca %val) {
   musttail call void @f.param.inalloca(i8* inalloca %val)
-  ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+  ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
 
   ret void
 }
@@ -1058,7 +1058,7 @@ catch2:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch3:
@@ -1067,9 +1067,9 @@ catch3:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch4:
@@ -1094,16 +1094,16 @@ define void @instructions.va_arg(i8* %v, ...) {
   %ap2 = bitcast i8** %ap to i8*
 
   call void @llvm.va_start(i8* %ap2)
-  ; CHECK: call void @llvm.va_start(i8* %ap2)
+  ; CHECK: call void @llvm.va_start(ptr %ap2)
 
   va_arg i8* %ap2, i32
-  ; CHECK: va_arg i8* %ap2, i32
+  ; CHECK: va_arg ptr %ap2, i32
 
   call void @llvm.va_copy(i8* %v, i8* %ap2)
-  ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+  ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
 
   call void @llvm.va_end(i8* %ap2)
-  ; CHECK: call void @llvm.va_end(i8* %ap2)
+  ; CHECK: call void @llvm.va_end(ptr %ap2)
 
   ret void
 }
@@ -1115,14 +1115,14 @@ declare void @llvm.gcwrite(i8*, i8*, i8**)
 define void @intrinsics.gc() gc "shadow-stack" {
   %ptrloc = alloca i8*
   call void @llvm.gcroot(i8** %ptrloc, i8* null)
-  ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+  ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
 
   call i8* @llvm.gcread(i8* null, i8** %ptrloc)
-  ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+  ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
 
   %ref = alloca i8
   call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
-  ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+  ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
 
   ret void
 }
@@ -1145,9 +1145,9 @@ declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
 !10 = !{!"rax"}
 define void @intrinsics.codegen() {
   call i8* @llvm.returnaddress(i32 1)
-  ; CHECK: call i8* @llvm.returnaddress(i32 1)
+  ; CHECK: call ptr @llvm.returnaddress(i32 1)
   call i8* @llvm.frameaddress(i32 1)
-  ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+  ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
 
   call i32 @llvm.read_register.i32(metadata !10)
   ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
@@ -1159,12 +1159,12 @@ define void @intrinsics.codegen() {
   ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
 
   %stack = call i8* @llvm.stacksave()
-  ; CHECK: %stack = call i8* @llvm.stacksave()
+  ; CHECK: %stack = call ptr @llvm.stacksave()
   call void @llvm.stackrestore(i8* %stack)
-  ; CHECK: call void @llvm.stackrestore(i8* %stack)
+  ; CHECK: call void @llvm.stackrestore(ptr %stack)
 
   call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
-  ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+  ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
 
   call void @llvm.pcmarker(i32 1)
   ; CHECK: call void @llvm.pcmarker(i32 1)
@@ -1173,10 +1173,10 @@ define void @intrinsics.codegen() {
   ; CHECK: call i64 @llvm.readcyclecounter()
 
   call void @llvm.clear_cache(i8* null, i8* null)
-  ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+  ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
 
   call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
-  ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+  ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
 
   ret void
 }
@@ -1186,7 +1186,7 @@ declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx)
 define void @intrinsics.localescape() {
   %static.alloca = alloca i32
   call void (...) @llvm.localescape(i32* %static.alloca)
-  ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+  ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
 
   call void @intrinsics.localrecover()
 
@@ -1196,7 +1196,7 @@ define void @intrinsics.localrecover() {
   %func = bitcast void ()* @intrinsics.localescape to i8*
   %fp = call i8* @llvm.frameaddress(i32 1)
   call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
-  ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+  ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
 
   ret void
 }

diff  --git a/llvm/test/Bitcode/compatibility-3.8.ll b/llvm/test/Bitcode/compatibility-3.8.ll
index d4755d5aec236..2d87fbb2f3148 100644
--- a/llvm/test/Bitcode/compatibility-3.8.ll
+++ b/llvm/test/Bitcode/compatibility-3.8.ll
@@ -37,7 +37,7 @@ $comdat.samesize = comdat samesize
 @const.float = constant double 0.0
 ; CHECK: @const.float = constant double 0.0
 @const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
 %const.struct.type = type { i32, i8 }
 %const.struct.type.packed = type <{ i32, i8 }>
 @const.struct = constant %const.struct.type { i32 -1, i8 undef }
@@ -186,13 +186,13 @@ $comdat2 = comdat any
 @g.used3 = global i8 0
 declare void @g.f1()
 @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
 @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
 @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 
 ;; Aliases
 ; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
@@ -200,51 +200,51 @@ declare void @g.f1()
 
 ; Aliases -- Linkage
 @a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
 @a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
 @a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
 @a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
 @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
 @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
 @a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
 
 ; Aliases -- Visibility
 @a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
 @a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
 @a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
 
 ; Aliases -- DLLStorageClass
 @a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
 @a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
 @a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
 
 ; Aliases -- ThreadLocal
 @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
 @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
 @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
 @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
 @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
 
 ; Aliases -- unnamed_addr
 @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
 
 ;; Functions
 ; Format: define [linkage] [visibility] [DLLStorageClass]
@@ -409,23 +409,23 @@ declare zeroext i64 @f.zeroext()
 declare signext i64 @f.signext()
 ; CHECK: declare signext i64 @f.signext()
 declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
 declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
 declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
 declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
 declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
 declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
 declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
 declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
 declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
 
 ; Functions -- Parameter attributes
 declare void @f.param.zeroext(i8 zeroext)
@@ -435,25 +435,25 @@ declare void @f.param.signext(i8 signext)
 declare void @f.param.inreg(i8 inreg)
 ; CHECK: declare void @f.param.inreg(i8 inreg)
 declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
 declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
 declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
 declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
 declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
 declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
 declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
 declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
 declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
 declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
 
 ; Functions -- unnamed_addr
 declare void @f.unnamed_addr() unnamed_addr
@@ -602,7 +602,7 @@ normal:
 declare i32 @f.personality_handler()
 ; CHECK: declare i32 @f.personality_handler()
 define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
   invoke void @llvm.donothing() to label %normal unwind label %exception
 exception:
   %cleanup = landingpad i32 cleanup
@@ -614,43 +614,43 @@ normal:
 ;; Atomic Memory Ordering Constraints
 define void @atomics(i32* %word) {
   %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
-  ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+  ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
   %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
-  ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+  ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
   %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
-  ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+  ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
   %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
-  ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+  ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
   %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
-  ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+  ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
   %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
-  ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+  ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
   %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
-  ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+  ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
   %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
-  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
   %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
-  ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+  ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
   %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
-  ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+  ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
   %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
-  ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+  ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
   %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
-  ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+  ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
   %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
-  ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+  ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
   %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
-  ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+  ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
   %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
-  ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+  ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
   %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
-  ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+  ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
   %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
-  ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+  ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
   %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
   %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
   fence acquire
   ; CHECK: fence acquire
   fence release
@@ -661,18 +661,18 @@ define void @atomics(i32* %word) {
   ; CHECK: fence syncscope("singlethread") seq_cst
 
   %ld.1 = load atomic i32, i32* %word monotonic, align 4
-  ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+  ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
   %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
-  ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+  ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
   %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
-  ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+  ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
 
   store atomic i32 23, i32* %word monotonic, align 4
-  ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+  ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
   store atomic volatile i32 24, i32* %word monotonic, align 4
-  ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+  ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
   store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
-  ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+  ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
   ret void
 }
 
@@ -717,17 +717,17 @@ define void @fastMathFlagsForCalls(float %f, double %d1, <4 x double> %d2) {
 %opaquety = type opaque
 define void @typesystem() {
   %p0 = bitcast i8* null to i32 (i32)*
-  ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+  ; CHECK: %p0 = bitcast ptr null to ptr
   %p1 = bitcast i8* null to void (i8*)*
-  ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+  ; CHECK: %p1 = bitcast ptr null to ptr
   %p2 = bitcast i8* null to i32 (i8*, ...)*
-  ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+  ; CHECK: %p2 = bitcast ptr null to ptr
   %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
-  ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+  ; CHECK: %p3 = bitcast ptr null to ptr
   %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
-  ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+  ; CHECK: %p4 = bitcast ptr null to ptr
   %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
-  ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+  ; CHECK: %p5 = bitcast ptr null to ptr
 
   %t0 = alloca i1942652
   ; CHECK: %t0 = alloca i1942652
@@ -746,7 +746,7 @@ define void @typesystem() {
   %t7 = alloca x86_mmx
   ; CHECK: %t7 = alloca x86_mmx
   %t8 = alloca %opaquety*
-  ; CHECK: %t8 = alloca %opaquety*
+  ; CHECK: %t8 = alloca ptr
 
   ret void
 }
@@ -795,9 +795,9 @@ defaultdest.1:
 defaultdest.2:
 
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
 
   invoke fastcc void @f.fastcc()
   ; CHECK: invoke fastcc void @f.fastcc()
@@ -837,7 +837,7 @@ catchswitch2:
 catchpad2:
   catchpad within %cs2 [i32* %arg1]
   br label %normal
-  ; CHECK: catchpad within %cs2 [i32* %arg1]
+  ; CHECK: catchpad within %cs2 [ptr %arg1]
   ; CHECK-NEXT: br label %normal
 
 catchswitch3:
@@ -846,7 +846,7 @@ catchswitch3:
 catchpad3:
   catchpad within %cs3 [i32* %arg1, i32* %arg2]
   br label %normal
-  ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+  ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
   ; CHECK-NEXT: br label %normal
 
 cleanuppad1:
@@ -1026,17 +1026,17 @@ define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
   %n.ptr = alloca { i8, { i32 } }
 
   getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
-  ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+  ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
   getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
-  ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+  ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
   getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
-  ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+  ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
   getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
-  ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+  ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
   getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
-  ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+  ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
   getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
-  ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+  ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
 
   ret void
 }
@@ -1052,14 +1052,14 @@ define void @instructions.memops(i32** %base) {
   ; CHECK: alloca inalloca i32, i8 4, align 4
 
   load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
   load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
 
   store i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
   store volatile i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
 
   ret void
 }
@@ -1085,13 +1085,13 @@ define void @instructions.conversions() {
   sitofp i32 -1 to float
   ; CHECK: sitofp i32 -1 to float
   ptrtoint i8* null to i64
-  ; CHECK: ptrtoint i8* null to i64
+  ; CHECK: ptrtoint ptr null to i64
   inttoptr i64 0 to i8*
-  ; CHECK: inttoptr i64 0 to i8*
+  ; CHECK: inttoptr i64 0 to ptr
   bitcast i32 0 to i32
   ; CHECK: bitcast i32 0 to i32
   addrspacecast i32* null to i32 addrspace(1)*
-  ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+  ; CHECK: addrspacecast ptr null to ptr addrspace(1)
 
   ret void
 }
@@ -1173,16 +1173,16 @@ exit:
   ; CHECK: call void @f.nobuiltin() #42
 
   call fastcc noalias i32* @f.noalias() noinline
-  ; CHECK: call fastcc noalias i32* @f.noalias() #12
+  ; CHECK: call fastcc noalias ptr @f.noalias() #12
   tail call ghccc nonnull i32* @f.nonnull() minsize
-  ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+  ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
 
   ret void
 }
 
 define void @instructions.call_musttail(i8* inalloca %val) {
   musttail call void @f.param.inalloca(i8* inalloca %val)
-  ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+  ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
 
   ret void
 }
@@ -1213,7 +1213,7 @@ catch2:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch3:
@@ -1222,9 +1222,9 @@ catch3:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch4:
@@ -1249,16 +1249,16 @@ define void @instructions.va_arg(i8* %v, ...) {
   %ap2 = bitcast i8** %ap to i8*
 
   call void @llvm.va_start(i8* %ap2)
-  ; CHECK: call void @llvm.va_start(i8* %ap2)
+  ; CHECK: call void @llvm.va_start(ptr %ap2)
 
   va_arg i8* %ap2, i32
-  ; CHECK: va_arg i8* %ap2, i32
+  ; CHECK: va_arg ptr %ap2, i32
 
   call void @llvm.va_copy(i8* %v, i8* %ap2)
-  ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+  ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
 
   call void @llvm.va_end(i8* %ap2)
-  ; CHECK: call void @llvm.va_end(i8* %ap2)
+  ; CHECK: call void @llvm.va_end(ptr %ap2)
 
   ret void
 }
@@ -1270,14 +1270,14 @@ declare void @llvm.gcwrite(i8*, i8*, i8**)
 define void @intrinsics.gc() gc "shadow-stack" {
   %ptrloc = alloca i8*
   call void @llvm.gcroot(i8** %ptrloc, i8* null)
-  ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+  ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
 
   call i8* @llvm.gcread(i8* null, i8** %ptrloc)
-  ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+  ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
 
   %ref = alloca i8
   call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
-  ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+  ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
 
   ret void
 }
@@ -1300,9 +1300,9 @@ declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
 !10 = !{!"rax"}
 define void @intrinsics.codegen() {
   call i8* @llvm.returnaddress(i32 1)
-  ; CHECK: call i8* @llvm.returnaddress(i32 1)
+  ; CHECK: call ptr @llvm.returnaddress(i32 1)
   call i8* @llvm.frameaddress(i32 1)
-  ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+  ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
 
   call i32 @llvm.read_register.i32(metadata !10)
   ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
@@ -1314,12 +1314,12 @@ define void @intrinsics.codegen() {
   ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
 
   %stack = call i8* @llvm.stacksave()
-  ; CHECK: %stack = call i8* @llvm.stacksave()
+  ; CHECK: %stack = call ptr @llvm.stacksave()
   call void @llvm.stackrestore(i8* %stack)
-  ; CHECK: call void @llvm.stackrestore(i8* %stack)
+  ; CHECK: call void @llvm.stackrestore(ptr %stack)
 
   call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
-  ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+  ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
 
   call void @llvm.pcmarker(i32 1)
   ; CHECK: call void @llvm.pcmarker(i32 1)
@@ -1328,10 +1328,10 @@ define void @intrinsics.codegen() {
   ; CHECK: call i64 @llvm.readcyclecounter()
 
   call void @llvm.clear_cache(i8* null, i8* null)
-  ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+  ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
 
   call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
-  ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+  ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
 
   ret void
 }
@@ -1341,7 +1341,7 @@ declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx)
 define void @intrinsics.localescape() {
   %static.alloca = alloca i32
   call void (...) @llvm.localescape(i32* %static.alloca)
-  ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+  ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
 
   call void @intrinsics.localrecover()
 
@@ -1351,7 +1351,7 @@ define void @intrinsics.localrecover() {
   %func = bitcast void ()* @intrinsics.localescape to i8*
   %fp = call i8* @llvm.frameaddress(i32 1)
   call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
-  ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+  ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
 
   ret void
 }

diff  --git a/llvm/test/Bitcode/compatibility-3.9.ll b/llvm/test/Bitcode/compatibility-3.9.ll
index eefac4eaffc12..a43bd79e42788 100644
--- a/llvm/test/Bitcode/compatibility-3.9.ll
+++ b/llvm/test/Bitcode/compatibility-3.9.ll
@@ -37,7 +37,7 @@ $comdat.samesize = comdat samesize
 @const.float = constant double 0.0
 ; CHECK: @const.float = constant double 0.0
 @const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
 %const.struct.type = type { i32, i8 }
 %const.struct.type.packed = type <{ i32, i8 }>
 @const.struct = constant %const.struct.type { i32 -1, i8 undef }
@@ -188,13 +188,13 @@ $comdat2 = comdat any
 @g.used3 = global i8 0
 declare void @g.f1()
 @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
 @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
 @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 
 ;; Aliases
 ; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
@@ -202,73 +202,73 @@ declare void @g.f1()
 
 ; Aliases -- Linkage
 @a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
 @a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
 @a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
 @a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
 @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
 @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
 @a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
 
 ; Aliases -- Visibility
 @a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
 @a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
 @a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
 
 ; Aliases -- DLLStorageClass
 @a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
 @a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
 @a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
 
 ; Aliases -- ThreadLocal
 @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
 @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
 @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
 @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
 @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
 
 ; Aliases -- unnamed_addr and local_unnamed_addr
 @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
 @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
 
 ;; IFunc
 ; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-;                  <ResolverTy>* @<Resolver>
+;                  ptr @<Resolver>
 
 ; IFunc -- Linkage
 @ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
 @ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
 @ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
 
 ; IFunc -- Visibility
 @ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
 @ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
 @ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
 
 define i8* @ifunc_resolver() {
 entry:
@@ -478,23 +478,23 @@ declare zeroext i64 @f.zeroext()
 declare signext i64 @f.signext()
 ; CHECK: declare signext i64 @f.signext()
 declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
 declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
 declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
 declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
 declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
 declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
 declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
 declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
 declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
 
 ; Functions -- Parameter attributes
 declare void @f.param.zeroext(i8 zeroext)
@@ -504,25 +504,25 @@ declare void @f.param.signext(i8 signext)
 declare void @f.param.inreg(i8 inreg)
 ; CHECK: declare void @f.param.inreg(i8 inreg)
 declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
 declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
 declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
 declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
 declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
 declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
 declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
 declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
 declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
 declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
 
 ; Functions -- unnamed_addr and local_unnamed_addr
 declare void @f.unnamed_addr() unnamed_addr
@@ -673,7 +673,7 @@ normal:
 declare i32 @f.personality_handler()
 ; CHECK: declare i32 @f.personality_handler()
 define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
   invoke void @llvm.donothing() to label %normal unwind label %exception
 exception:
   %cleanup = landingpad i32 cleanup
@@ -685,43 +685,43 @@ normal:
 ;; Atomic Memory Ordering Constraints
 define void @atomics(i32* %word) {
   %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
-  ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+  ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
   %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
-  ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+  ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
   %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
-  ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+  ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
   %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
-  ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+  ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
   %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
-  ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+  ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
   %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
-  ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+  ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
   %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
-  ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+  ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
   %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
-  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
   %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
-  ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+  ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
   %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
-  ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+  ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
   %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
-  ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+  ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
   %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
-  ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+  ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
   %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
-  ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+  ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
   %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
-  ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+  ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
   %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
-  ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+  ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
   %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
-  ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+  ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
   %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
-  ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+  ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
   %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
   %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
   fence acquire
   ; CHECK: fence acquire
   fence release
@@ -732,18 +732,18 @@ define void @atomics(i32* %word) {
   ; CHECK: fence syncscope("singlethread") seq_cst
 
   %ld.1 = load atomic i32, i32* %word monotonic, align 4
-  ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+  ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
   %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
-  ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+  ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
   %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
-  ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+  ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
 
   store atomic i32 23, i32* %word monotonic, align 4
-  ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+  ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
   store atomic volatile i32 24, i32* %word monotonic, align 4
-  ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+  ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
   store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
-  ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+  ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
   ret void
 }
 
@@ -788,17 +788,17 @@ define void @fastMathFlagsForCalls(float %f, double %d1, <4 x double> %d2) {
 %opaquety = type opaque
 define void @typesystem() {
   %p0 = bitcast i8* null to i32 (i32)*
-  ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+  ; CHECK: %p0 = bitcast ptr null to ptr
   %p1 = bitcast i8* null to void (i8*)*
-  ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+  ; CHECK: %p1 = bitcast ptr null to ptr
   %p2 = bitcast i8* null to i32 (i8*, ...)*
-  ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+  ; CHECK: %p2 = bitcast ptr null to ptr
   %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
-  ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+  ; CHECK: %p3 = bitcast ptr null to ptr
   %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
-  ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+  ; CHECK: %p4 = bitcast ptr null to ptr
   %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
-  ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+  ; CHECK: %p5 = bitcast ptr null to ptr
 
   %t0 = alloca i1942652
   ; CHECK: %t0 = alloca i1942652
@@ -817,7 +817,7 @@ define void @typesystem() {
   %t7 = alloca x86_mmx
   ; CHECK: %t7 = alloca x86_mmx
   %t8 = alloca %opaquety*
-  ; CHECK: %t8 = alloca %opaquety*
+  ; CHECK: %t8 = alloca ptr
 
   ret void
 }
@@ -866,9 +866,9 @@ defaultdest.1:
 defaultdest.2:
 
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
 
   invoke fastcc void @f.fastcc()
   ; CHECK: invoke fastcc void @f.fastcc()
@@ -908,7 +908,7 @@ catchswitch2:
 catchpad2:
   catchpad within %cs2 [i32* %arg1]
   br label %normal
-  ; CHECK: catchpad within %cs2 [i32* %arg1]
+  ; CHECK: catchpad within %cs2 [ptr %arg1]
   ; CHECK-NEXT: br label %normal
 
 catchswitch3:
@@ -917,7 +917,7 @@ catchswitch3:
 catchpad3:
   catchpad within %cs3 [i32* %arg1, i32* %arg2]
   br label %normal
-  ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+  ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
   ; CHECK-NEXT: br label %normal
 
 cleanuppad1:
@@ -1097,17 +1097,17 @@ define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
   %n.ptr = alloca { i8, { i32 } }
 
   getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
-  ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+  ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
   getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
-  ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+  ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
   getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
-  ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+  ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
   getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
-  ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+  ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
   getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
-  ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+  ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
   getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
-  ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+  ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
 
   ret void
 }
@@ -1123,14 +1123,14 @@ define void @instructions.memops(i32** %base) {
   ; CHECK: alloca inalloca i32, i8 4, align 4
 
   load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
   load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
 
   store i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
   store volatile i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
 
   ret void
 }
@@ -1156,13 +1156,13 @@ define void @instructions.conversions() {
   sitofp i32 -1 to float
   ; CHECK: sitofp i32 -1 to float
   ptrtoint i8* null to i64
-  ; CHECK: ptrtoint i8* null to i64
+  ; CHECK: ptrtoint ptr null to i64
   inttoptr i64 0 to i8*
-  ; CHECK: inttoptr i64 0 to i8*
+  ; CHECK: inttoptr i64 0 to ptr
   bitcast i32 0 to i32
   ; CHECK: bitcast i32 0 to i32
   addrspacecast i32* null to i32 addrspace(1)*
-  ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+  ; CHECK: addrspacecast ptr null to ptr addrspace(1)
 
   ret void
 }
@@ -1244,16 +1244,16 @@ exit:
   ; CHECK: call void @f.nobuiltin() #43
 
   call fastcc noalias i32* @f.noalias() noinline
-  ; CHECK: call fastcc noalias i32* @f.noalias() #12
+  ; CHECK: call fastcc noalias ptr @f.noalias() #12
   tail call ghccc nonnull i32* @f.nonnull() minsize
-  ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+  ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
 
   ret void
 }
 
 define void @instructions.call_musttail(i8* inalloca %val) {
   musttail call void @f.param.inalloca(i8* inalloca %val)
-  ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+  ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
 
   ret void
 }
@@ -1284,7 +1284,7 @@ catch2:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch3:
@@ -1293,9 +1293,9 @@ catch3:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch4:
@@ -1320,16 +1320,16 @@ define void @instructions.va_arg(i8* %v, ...) {
   %ap2 = bitcast i8** %ap to i8*
 
   call void @llvm.va_start(i8* %ap2)
-  ; CHECK: call void @llvm.va_start(i8* %ap2)
+  ; CHECK: call void @llvm.va_start(ptr %ap2)
 
   va_arg i8* %ap2, i32
-  ; CHECK: va_arg i8* %ap2, i32
+  ; CHECK: va_arg ptr %ap2, i32
 
   call void @llvm.va_copy(i8* %v, i8* %ap2)
-  ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+  ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
 
   call void @llvm.va_end(i8* %ap2)
-  ; CHECK: call void @llvm.va_end(i8* %ap2)
+  ; CHECK: call void @llvm.va_end(ptr %ap2)
 
   ret void
 }
@@ -1341,14 +1341,14 @@ declare void @llvm.gcwrite(i8*, i8*, i8**)
 define void @intrinsics.gc() gc "shadow-stack" {
   %ptrloc = alloca i8*
   call void @llvm.gcroot(i8** %ptrloc, i8* null)
-  ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+  ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
 
   call i8* @llvm.gcread(i8* null, i8** %ptrloc)
-  ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+  ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
 
   %ref = alloca i8
   call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
-  ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+  ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
 
   ret void
 }
@@ -1371,9 +1371,9 @@ declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
 !10 = !{!"rax"}
 define void @intrinsics.codegen() {
   call i8* @llvm.returnaddress(i32 1)
-  ; CHECK: call i8* @llvm.returnaddress(i32 1)
+  ; CHECK: call ptr @llvm.returnaddress(i32 1)
   call i8* @llvm.frameaddress(i32 1)
-  ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+  ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
 
   call i32 @llvm.read_register.i32(metadata !10)
   ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
@@ -1385,12 +1385,12 @@ define void @intrinsics.codegen() {
   ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
 
   %stack = call i8* @llvm.stacksave()
-  ; CHECK: %stack = call i8* @llvm.stacksave()
+  ; CHECK: %stack = call ptr @llvm.stacksave()
   call void @llvm.stackrestore(i8* %stack)
-  ; CHECK: call void @llvm.stackrestore(i8* %stack)
+  ; CHECK: call void @llvm.stackrestore(ptr %stack)
 
   call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
-  ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+  ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
 
   call void @llvm.pcmarker(i32 1)
   ; CHECK: call void @llvm.pcmarker(i32 1)
@@ -1399,10 +1399,10 @@ define void @intrinsics.codegen() {
   ; CHECK: call i64 @llvm.readcyclecounter()
 
   call void @llvm.clear_cache(i8* null, i8* null)
-  ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+  ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
 
   call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
-  ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+  ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
 
   ret void
 }
@@ -1412,7 +1412,7 @@ declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx)
 define void @intrinsics.localescape() {
   %static.alloca = alloca i32
   call void (...) @llvm.localescape(i32* %static.alloca)
-  ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+  ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
 
   call void @intrinsics.localrecover()
 
@@ -1422,7 +1422,7 @@ define void @intrinsics.localrecover() {
   %func = bitcast void ()* @intrinsics.localescape to i8*
   %fp = call i8* @llvm.frameaddress(i32 1)
   call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
-  ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+  ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
 
   ret void
 }

diff  --git a/llvm/test/Bitcode/compatibility-4.0.ll b/llvm/test/Bitcode/compatibility-4.0.ll
index e9731dcc2889c..a73628772bcee 100644
--- a/llvm/test/Bitcode/compatibility-4.0.ll
+++ b/llvm/test/Bitcode/compatibility-4.0.ll
@@ -37,7 +37,7 @@ $comdat.samesize = comdat samesize
 @const.float = constant double 0.0
 ; CHECK: @const.float = constant double 0.0
 @const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
 %const.struct.type = type { i32, i8 }
 %const.struct.type.packed = type <{ i32, i8 }>
 @const.struct = constant %const.struct.type { i32 -1, i8 undef }
@@ -188,13 +188,13 @@ $comdat2 = comdat any
 @g.used3 = global i8 0
 declare void @g.f1()
 @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
 @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
 @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 
 ;; Aliases
 ; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
@@ -202,73 +202,73 @@ declare void @g.f1()
 
 ; Aliases -- Linkage
 @a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
 @a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
 @a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
 @a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
 @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
 @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
 @a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
 
 ; Aliases -- Visibility
 @a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
 @a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
 @a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
 
 ; Aliases -- DLLStorageClass
 @a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
 @a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
 @a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
 
 ; Aliases -- ThreadLocal
 @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
 @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
 @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
 @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
 @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
 
 ; Aliases -- unnamed_addr and local_unnamed_addr
 @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
 @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
 
 ;; IFunc
 ; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-;                  <ResolverTy>* @<Resolver>
+;                  ptr @<Resolver>
 
 ; IFunc -- Linkage
 @ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
 @ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
 @ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
 
 ; IFunc -- Visibility
 @ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
 @ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
 @ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
 
 define i8* @ifunc_resolver() {
 entry:
@@ -478,23 +478,23 @@ declare zeroext i64 @f.zeroext()
 declare signext i64 @f.signext()
 ; CHECK: declare signext i64 @f.signext()
 declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
 declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
 declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
 declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
 declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
 declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
 declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
 declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
 declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
 
 ; Functions -- Parameter attributes
 declare void @f.param.zeroext(i8 zeroext)
@@ -504,25 +504,25 @@ declare void @f.param.signext(i8 signext)
 declare void @f.param.inreg(i8 inreg)
 ; CHECK: declare void @f.param.inreg(i8 inreg)
 declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
 declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
 declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
 declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
 declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
 declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
 declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
 declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
 declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
 declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
 
 ; Functions -- unnamed_addr and local_unnamed_addr
 declare void @f.unnamed_addr() unnamed_addr
@@ -673,7 +673,7 @@ normal:
 declare i32 @f.personality_handler()
 ; CHECK: declare i32 @f.personality_handler()
 define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
   invoke void @llvm.donothing() to label %normal unwind label %exception
 exception:
   %cleanup = landingpad i32 cleanup
@@ -685,43 +685,43 @@ normal:
 ;; Atomic Memory Ordering Constraints
 define void @atomics(i32* %word) {
   %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
-  ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+  ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
   %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
-  ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+  ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
   %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
-  ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+  ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
   %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
-  ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+  ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
   %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
-  ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+  ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
   %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
-  ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+  ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
   %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
-  ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+  ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
   %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
-  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
   %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
-  ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+  ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
   %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
-  ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+  ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
   %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
-  ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+  ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
   %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
-  ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+  ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
   %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
-  ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+  ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
   %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
-  ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+  ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
   %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
-  ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+  ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
   %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
-  ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+  ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
   %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
-  ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+  ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
   %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
   %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
   fence acquire
   ; CHECK: fence acquire
   fence release
@@ -732,18 +732,18 @@ define void @atomics(i32* %word) {
   ; CHECK: fence syncscope("singlethread") seq_cst
 
   %ld.1 = load atomic i32, i32* %word monotonic, align 4
-  ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+  ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
   %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
-  ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+  ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
   %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
-  ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+  ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
 
   store atomic i32 23, i32* %word monotonic, align 4
-  ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+  ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
   store atomic volatile i32 24, i32* %word monotonic, align 4
-  ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+  ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
   store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
-  ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+  ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
   ret void
 }
 
@@ -788,17 +788,17 @@ define void @fastMathFlagsForCalls(float %f, double %d1, <4 x double> %d2) {
 %opaquety = type opaque
 define void @typesystem() {
   %p0 = bitcast i8* null to i32 (i32)*
-  ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+  ; CHECK: %p0 = bitcast ptr null to ptr
   %p1 = bitcast i8* null to void (i8*)*
-  ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+  ; CHECK: %p1 = bitcast ptr null to ptr
   %p2 = bitcast i8* null to i32 (i8*, ...)*
-  ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+  ; CHECK: %p2 = bitcast ptr null to ptr
   %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
-  ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+  ; CHECK: %p3 = bitcast ptr null to ptr
   %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
-  ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+  ; CHECK: %p4 = bitcast ptr null to ptr
   %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
-  ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+  ; CHECK: %p5 = bitcast ptr null to ptr
 
   %t0 = alloca i1942652
   ; CHECK: %t0 = alloca i1942652
@@ -817,7 +817,7 @@ define void @typesystem() {
   %t7 = alloca x86_mmx
   ; CHECK: %t7 = alloca x86_mmx
   %t8 = alloca %opaquety*
-  ; CHECK: %t8 = alloca %opaquety*
+  ; CHECK: %t8 = alloca ptr
 
   ret void
 }
@@ -866,9 +866,9 @@ defaultdest.1:
 defaultdest.2:
 
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
 
   invoke fastcc void @f.fastcc()
   ; CHECK: invoke fastcc void @f.fastcc()
@@ -908,7 +908,7 @@ catchswitch2:
 catchpad2:
   catchpad within %cs2 [i32* %arg1]
   br label %normal
-  ; CHECK: catchpad within %cs2 [i32* %arg1]
+  ; CHECK: catchpad within %cs2 [ptr %arg1]
   ; CHECK-NEXT: br label %normal
 
 catchswitch3:
@@ -917,7 +917,7 @@ catchswitch3:
 catchpad3:
   catchpad within %cs3 [i32* %arg1, i32* %arg2]
   br label %normal
-  ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+  ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
   ; CHECK-NEXT: br label %normal
 
 cleanuppad1:
@@ -1097,17 +1097,17 @@ define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
   %n.ptr = alloca { i8, { i32 } }
 
   getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
-  ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+  ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
   getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
-  ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+  ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
   getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
-  ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+  ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
   getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
-  ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+  ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
   getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
-  ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+  ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
   getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
-  ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+  ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
 
   ret void
 }
@@ -1123,14 +1123,14 @@ define void @instructions.memops(i32** %base) {
   ; CHECK: alloca inalloca i32, i8 4, align 4
 
   load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
   load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
 
   store i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
   store volatile i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
 
   ret void
 }
@@ -1156,13 +1156,13 @@ define void @instructions.conversions() {
   sitofp i32 -1 to float
   ; CHECK: sitofp i32 -1 to float
   ptrtoint i8* null to i64
-  ; CHECK: ptrtoint i8* null to i64
+  ; CHECK: ptrtoint ptr null to i64
   inttoptr i64 0 to i8*
-  ; CHECK: inttoptr i64 0 to i8*
+  ; CHECK: inttoptr i64 0 to ptr
   bitcast i32 0 to i32
   ; CHECK: bitcast i32 0 to i32
   addrspacecast i32* null to i32 addrspace(1)*
-  ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+  ; CHECK: addrspacecast ptr null to ptr addrspace(1)
 
   ret void
 }
@@ -1244,16 +1244,16 @@ exit:
   ; CHECK: call void @f.nobuiltin() #43
 
   call fastcc noalias i32* @f.noalias() noinline
-  ; CHECK: call fastcc noalias i32* @f.noalias() #12
+  ; CHECK: call fastcc noalias ptr @f.noalias() #12
   tail call ghccc nonnull i32* @f.nonnull() minsize
-  ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+  ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
 
   ret void
 }
 
 define void @instructions.call_musttail(i8* inalloca %val) {
   musttail call void @f.param.inalloca(i8* inalloca %val)
-  ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+  ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
 
   ret void
 }
@@ -1284,7 +1284,7 @@ catch2:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch3:
@@ -1293,9 +1293,9 @@ catch3:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch4:
@@ -1320,16 +1320,16 @@ define void @instructions.va_arg(i8* %v, ...) {
   %ap2 = bitcast i8** %ap to i8*
 
   call void @llvm.va_start(i8* %ap2)
-  ; CHECK: call void @llvm.va_start(i8* %ap2)
+  ; CHECK: call void @llvm.va_start(ptr %ap2)
 
   va_arg i8* %ap2, i32
-  ; CHECK: va_arg i8* %ap2, i32
+  ; CHECK: va_arg ptr %ap2, i32
 
   call void @llvm.va_copy(i8* %v, i8* %ap2)
-  ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+  ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
 
   call void @llvm.va_end(i8* %ap2)
-  ; CHECK: call void @llvm.va_end(i8* %ap2)
+  ; CHECK: call void @llvm.va_end(ptr %ap2)
 
   ret void
 }
@@ -1341,14 +1341,14 @@ declare void @llvm.gcwrite(i8*, i8*, i8**)
 define void @intrinsics.gc() gc "shadow-stack" {
   %ptrloc = alloca i8*
   call void @llvm.gcroot(i8** %ptrloc, i8* null)
-  ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+  ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
 
   call i8* @llvm.gcread(i8* null, i8** %ptrloc)
-  ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+  ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
 
   %ref = alloca i8
   call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
-  ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+  ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
 
   ret void
 }
@@ -1371,9 +1371,9 @@ declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
 !10 = !{!"rax"}
 define void @intrinsics.codegen() {
   call i8* @llvm.returnaddress(i32 1)
-  ; CHECK: call i8* @llvm.returnaddress(i32 1)
+  ; CHECK: call ptr @llvm.returnaddress(i32 1)
   call i8* @llvm.frameaddress(i32 1)
-  ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+  ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
 
   call i32 @llvm.read_register.i32(metadata !10)
   ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
@@ -1385,12 +1385,12 @@ define void @intrinsics.codegen() {
   ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
 
   %stack = call i8* @llvm.stacksave()
-  ; CHECK: %stack = call i8* @llvm.stacksave()
+  ; CHECK: %stack = call ptr @llvm.stacksave()
   call void @llvm.stackrestore(i8* %stack)
-  ; CHECK: call void @llvm.stackrestore(i8* %stack)
+  ; CHECK: call void @llvm.stackrestore(ptr %stack)
 
   call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
-  ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+  ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
 
   call void @llvm.pcmarker(i32 1)
   ; CHECK: call void @llvm.pcmarker(i32 1)
@@ -1399,10 +1399,10 @@ define void @intrinsics.codegen() {
   ; CHECK: call i64 @llvm.readcyclecounter()
 
   call void @llvm.clear_cache(i8* null, i8* null)
-  ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+  ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
 
   call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
-  ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+  ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
 
   ret void
 }
@@ -1412,7 +1412,7 @@ declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx)
 define void @intrinsics.localescape() {
   %static.alloca = alloca i32
   call void (...) @llvm.localescape(i32* %static.alloca)
-  ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+  ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
 
   call void @intrinsics.localrecover()
 
@@ -1422,7 +1422,7 @@ define void @intrinsics.localrecover() {
   %func = bitcast void ()* @intrinsics.localescape to i8*
   %fp = call i8* @llvm.frameaddress(i32 1)
   call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
-  ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+  ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
 
   ret void
 }
@@ -1611,7 +1611,7 @@ declare void @f.writeonly() writeonly
 ;; Constant Expressions
 
 define i8** @constexpr() {
-  ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+  ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
   ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
 }
 

diff  --git a/llvm/test/Bitcode/compatibility-5.0.ll b/llvm/test/Bitcode/compatibility-5.0.ll
index 2ef54fab549b0..e440ed0d49086 100644
--- a/llvm/test/Bitcode/compatibility-5.0.ll
+++ b/llvm/test/Bitcode/compatibility-5.0.ll
@@ -37,7 +37,7 @@ $comdat.samesize = comdat samesize
 @const.float = constant double 0.0
 ; CHECK: @const.float = constant double 0.0
 @const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
 %const.struct.type = type { i32, i8 }
 %const.struct.type.packed = type <{ i32, i8 }>
 @const.struct = constant %const.struct.type { i32 -1, i8 undef }
@@ -188,13 +188,13 @@ $comdat2 = comdat any
 @g.used3 = global i8 0
 declare void @g.f1()
 @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
 @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
 @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 
 ;; Aliases
 ; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
@@ -202,73 +202,73 @@ declare void @g.f1()
 
 ; Aliases -- Linkage
 @a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
 @a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
 @a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
 @a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
 @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
 @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
 @a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
 
 ; Aliases -- Visibility
 @a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
 @a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
 @a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
 
 ; Aliases -- DLLStorageClass
 @a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
 @a.dllimport = dllimport alias i32, i32* @g1
-; CHECK: @a.dllimport = dllimport alias i32, i32* @g1
+; CHECK: @a.dllimport = dllimport alias i32, ptr @g1
 @a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
 
 ; Aliases -- ThreadLocal
 @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
 @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
 @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
 @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
 @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
 
 ; Aliases -- unnamed_addr and local_unnamed_addr
 @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
 @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
 
 ;; IFunc
 ; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-;                  <ResolverTy>* @<Resolver>
+;                  ptr @<Resolver>
 
 ; IFunc -- Linkage
 @ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
 @ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
 @ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
 
 ; IFunc -- Visibility
 @ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
 @ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
 @ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
 
 define i8* @ifunc_resolver() {
 entry:
@@ -482,23 +482,23 @@ declare zeroext i64 @f.zeroext()
 declare signext i64 @f.signext()
 ; CHECK: declare signext i64 @f.signext()
 declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
 declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
 declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
 declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
 declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
 declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
 declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
 declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
 declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
 
 ; Functions -- Parameter attributes
 declare void @f.param.zeroext(i8 zeroext)
@@ -508,25 +508,25 @@ declare void @f.param.signext(i8 signext)
 declare void @f.param.inreg(i8 inreg)
 ; CHECK: declare void @f.param.inreg(i8 inreg)
 declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
 declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
 declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
 declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
 declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
 declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
 declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
 declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
 declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
 declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
 
 ; Functions -- unnamed_addr and local_unnamed_addr
 declare void @f.unnamed_addr() unnamed_addr
@@ -678,7 +678,7 @@ normal:
 declare i32 @f.personality_handler()
 ; CHECK: declare i32 @f.personality_handler()
 define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
   invoke void @llvm.donothing() to label %normal unwind label %exception
 exception:
   %cleanup = landingpad i32 cleanup
@@ -690,43 +690,43 @@ normal:
 ;; Atomic Memory Ordering Constraints
 define void @atomics(i32* %word) {
   %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
-  ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+  ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
   %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
-  ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+  ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
   %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
-  ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+  ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
   %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
-  ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+  ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
   %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
-  ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+  ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
   %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
-  ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+  ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
   %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
-  ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+  ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
   %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
-  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
   %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
-  ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+  ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
   %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
-  ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+  ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
   %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
-  ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+  ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
   %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
-  ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+  ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
   %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
-  ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+  ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
   %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
-  ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+  ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
   %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
-  ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+  ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
   %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
-  ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+  ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
   %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
-  ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+  ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
   %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
   %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
   fence acquire
   ; CHECK: fence acquire
   fence release
@@ -737,18 +737,18 @@ define void @atomics(i32* %word) {
   ; CHECK: fence syncscope("singlethread") seq_cst
 
   %ld.1 = load atomic i32, i32* %word monotonic, align 4
-  ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+  ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
   %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
-  ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+  ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
   %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
-  ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+  ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
 
   store atomic i32 23, i32* %word monotonic, align 4
-  ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+  ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
   store atomic volatile i32 24, i32* %word monotonic, align 4
-  ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+  ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
   store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
-  ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+  ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
   ret void
 }
 
@@ -795,17 +795,17 @@ define void @fastMathFlagsForCalls(float %f, double %d1, <4 x double> %d2) {
 %opaquety = type opaque
 define void @typesystem() {
   %p0 = bitcast i8* null to i32 (i32)*
-  ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+  ; CHECK: %p0 = bitcast ptr null to ptr
   %p1 = bitcast i8* null to void (i8*)*
-  ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+  ; CHECK: %p1 = bitcast ptr null to ptr
   %p2 = bitcast i8* null to i32 (i8*, ...)*
-  ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+  ; CHECK: %p2 = bitcast ptr null to ptr
   %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
-  ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+  ; CHECK: %p3 = bitcast ptr null to ptr
   %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
-  ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+  ; CHECK: %p4 = bitcast ptr null to ptr
   %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
-  ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+  ; CHECK: %p5 = bitcast ptr null to ptr
 
   %t0 = alloca i1942652
   ; CHECK: %t0 = alloca i1942652
@@ -824,7 +824,7 @@ define void @typesystem() {
   %t7 = alloca x86_mmx
   ; CHECK: %t7 = alloca x86_mmx
   %t8 = alloca %opaquety*
-  ; CHECK: %t8 = alloca %opaquety*
+  ; CHECK: %t8 = alloca ptr
 
   ret void
 }
@@ -873,9 +873,9 @@ defaultdest.1:
 defaultdest.2:
 
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
 
   invoke fastcc void @f.fastcc()
   ; CHECK: invoke fastcc void @f.fastcc()
@@ -915,7 +915,7 @@ catchswitch2:
 catchpad2:
   catchpad within %cs2 [i32* %arg1]
   br label %normal
-  ; CHECK: catchpad within %cs2 [i32* %arg1]
+  ; CHECK: catchpad within %cs2 [ptr %arg1]
   ; CHECK-NEXT: br label %normal
 
 catchswitch3:
@@ -924,7 +924,7 @@ catchswitch3:
 catchpad3:
   catchpad within %cs3 [i32* %arg1, i32* %arg2]
   br label %normal
-  ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+  ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
   ; CHECK-NEXT: br label %normal
 
 cleanuppad1:
@@ -1104,17 +1104,17 @@ define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
   %n.ptr = alloca { i8, { i32 } }
 
   getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
-  ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+  ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
   getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
-  ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+  ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
   getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
-  ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+  ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
   getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
-  ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+  ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
   getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
-  ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+  ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
   getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
-  ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+  ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
 
   ret void
 }
@@ -1130,14 +1130,14 @@ define void @instructions.memops(i32** %base) {
   ; CHECK: alloca inalloca i32, i8 4, align 4
 
   load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
   load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
 
   store i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
   store volatile i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
 
   ret void
 }
@@ -1163,13 +1163,13 @@ define void @instructions.conversions() {
   sitofp i32 -1 to float
   ; CHECK: sitofp i32 -1 to float
   ptrtoint i8* null to i64
-  ; CHECK: ptrtoint i8* null to i64
+  ; CHECK: ptrtoint ptr null to i64
   inttoptr i64 0 to i8*
-  ; CHECK: inttoptr i64 0 to i8*
+  ; CHECK: inttoptr i64 0 to ptr
   bitcast i32 0 to i32
   ; CHECK: bitcast i32 0 to i32
   addrspacecast i32* null to i32 addrspace(1)*
-  ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+  ; CHECK: addrspacecast ptr null to ptr addrspace(1)
 
   ret void
 }
@@ -1256,16 +1256,16 @@ exit:
   ; CHECK: call void @f.strictfp() #9
 
   call fastcc noalias i32* @f.noalias() noinline
-  ; CHECK: call fastcc noalias i32* @f.noalias() #12
+  ; CHECK: call fastcc noalias ptr @f.noalias() #12
   tail call ghccc nonnull i32* @f.nonnull() minsize
-  ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+  ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
 
   ret void
 }
 
 define void @instructions.call_musttail(i8* inalloca %val) {
   musttail call void @f.param.inalloca(i8* inalloca %val)
-  ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+  ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
 
   ret void
 }
@@ -1296,7 +1296,7 @@ catch2:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch3:
@@ -1305,9 +1305,9 @@ catch3:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch4:
@@ -1332,16 +1332,16 @@ define void @instructions.va_arg(i8* %v, ...) {
   %ap2 = bitcast i8** %ap to i8*
 
   call void @llvm.va_start(i8* %ap2)
-  ; CHECK: call void @llvm.va_start(i8* %ap2)
+  ; CHECK: call void @llvm.va_start(ptr %ap2)
 
   va_arg i8* %ap2, i32
-  ; CHECK: va_arg i8* %ap2, i32
+  ; CHECK: va_arg ptr %ap2, i32
 
   call void @llvm.va_copy(i8* %v, i8* %ap2)
-  ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+  ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
 
   call void @llvm.va_end(i8* %ap2)
-  ; CHECK: call void @llvm.va_end(i8* %ap2)
+  ; CHECK: call void @llvm.va_end(ptr %ap2)
 
   ret void
 }
@@ -1353,14 +1353,14 @@ declare void @llvm.gcwrite(i8*, i8*, i8**)
 define void @intrinsics.gc() gc "shadow-stack" {
   %ptrloc = alloca i8*
   call void @llvm.gcroot(i8** %ptrloc, i8* null)
-  ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+  ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
 
   call i8* @llvm.gcread(i8* null, i8** %ptrloc)
-  ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+  ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
 
   %ref = alloca i8
   call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
-  ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+  ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
 
   ret void
 }
@@ -1383,9 +1383,9 @@ declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
 !10 = !{!"rax"}
 define void @intrinsics.codegen() {
   call i8* @llvm.returnaddress(i32 1)
-  ; CHECK: call i8* @llvm.returnaddress(i32 1)
+  ; CHECK: call ptr @llvm.returnaddress(i32 1)
   call i8* @llvm.frameaddress(i32 1)
-  ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+  ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
 
   call i32 @llvm.read_register.i32(metadata !10)
   ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
@@ -1397,12 +1397,12 @@ define void @intrinsics.codegen() {
   ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
 
   %stack = call i8* @llvm.stacksave()
-  ; CHECK: %stack = call i8* @llvm.stacksave()
+  ; CHECK: %stack = call ptr @llvm.stacksave()
   call void @llvm.stackrestore(i8* %stack)
-  ; CHECK: call void @llvm.stackrestore(i8* %stack)
+  ; CHECK: call void @llvm.stackrestore(ptr %stack)
 
   call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
-  ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+  ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
 
   call void @llvm.pcmarker(i32 1)
   ; CHECK: call void @llvm.pcmarker(i32 1)
@@ -1411,10 +1411,10 @@ define void @intrinsics.codegen() {
   ; CHECK: call i64 @llvm.readcyclecounter()
 
   call void @llvm.clear_cache(i8* null, i8* null)
-  ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+  ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
 
   call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
-  ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+  ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
 
   ret void
 }
@@ -1424,7 +1424,7 @@ declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx)
 define void @intrinsics.localescape() {
   %static.alloca = alloca i32
   call void (...) @llvm.localescape(i32* %static.alloca)
-  ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+  ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
 
   call void @intrinsics.localrecover()
 
@@ -1434,7 +1434,7 @@ define void @intrinsics.localrecover() {
   %func = bitcast void ()* @intrinsics.localescape to i8*
   %fp = call i8* @llvm.frameaddress(i32 1)
   call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
-  ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+  ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
 
   ret void
 }
@@ -1626,7 +1626,7 @@ declare void @f.speculatable() speculatable
 ;; Constant Expressions
 
 define i8** @constexpr() {
-  ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+  ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
   ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
 }
 

diff  --git a/llvm/test/Bitcode/compatibility-6.0.ll b/llvm/test/Bitcode/compatibility-6.0.ll
index c05b5c7f4c560..09fe08ff10f18 100644
--- a/llvm/test/Bitcode/compatibility-6.0.ll
+++ b/llvm/test/Bitcode/compatibility-6.0.ll
@@ -38,7 +38,7 @@ $comdat.samesize = comdat samesize
 @const.float = constant double 0.0
 ; CHECK: @const.float = constant double 0.0
 @const.null = constant i8* null
-; CHECK: @const.null = constant i8* null
+; CHECK: @const.null = constant ptr null
 %const.struct.type = type { i32, i8 }
 %const.struct.type.packed = type <{ i32, i8 }>
 @const.struct = constant %const.struct.type { i32 -1, i8 undef }
@@ -189,13 +189,13 @@ $comdat2 = comdat any
 @g.used3 = global i8 0
 declare void @g.f1()
 @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
-; CHECK: @llvm.used = appending global [1 x i32*] [i32* @g.used1], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @g.used1], section "llvm.metadata"
 @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
-; CHECK: @llvm.compiler.used = appending global [1 x i32*] [i32* @g.used2], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @g.used2], section "llvm.metadata"
 @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
-; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, void ()* @g.f1, i8* @g.used3 }], section "llvm.metadata"
+; CHECK: @llvm.global_dtors = appending global [1 x %pri.func.data] [%pri.func.data { i32 0, ptr @g.f1, ptr @g.used3 }], section "llvm.metadata"
 
 ;; Aliases
 ; Format: @<Name> = [Linkage] [Visibility] [DLLStorageClass] [ThreadLocal]
@@ -203,71 +203,71 @@ declare void @g.f1()
 
 ; Aliases -- Linkage
 @a.private = private alias i32, i32* @g.private
-; CHECK: @a.private = private alias i32, i32* @g.private
+; CHECK: @a.private = private alias i32, ptr @g.private
 @a.internal = internal alias i32, i32* @g.internal
-; CHECK: @a.internal = internal alias i32, i32* @g.internal
+; CHECK: @a.internal = internal alias i32, ptr @g.internal
 @a.linkonce = linkonce alias i32, i32* @g.linkonce
-; CHECK: @a.linkonce = linkonce alias i32, i32* @g.linkonce
+; CHECK: @a.linkonce = linkonce alias i32, ptr @g.linkonce
 @a.weak = weak alias i32, i32* @g.weak
-; CHECK: @a.weak = weak alias i32, i32* @g.weak
+; CHECK: @a.weak = weak alias i32, ptr @g.weak
 @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
-; CHECK: @a.linkonce_odr = linkonce_odr alias i32, i32* @g.linkonce_odr
+; CHECK: @a.linkonce_odr = linkonce_odr alias i32, ptr @g.linkonce_odr
 @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
-; CHECK: @a.weak_odr = weak_odr alias i32, i32* @g.weak_odr
+; CHECK: @a.weak_odr = weak_odr alias i32, ptr @g.weak_odr
 @a.external = external alias i32, i32* @g1
-; CHECK: @a.external = alias i32, i32* @g1
+; CHECK: @a.external = alias i32, ptr @g1
 
 ; Aliases -- Visibility
 @a.default = default alias i32, i32* @g.default
-; CHECK: @a.default = alias i32, i32* @g.default
+; CHECK: @a.default = alias i32, ptr @g.default
 @a.hidden = hidden alias i32, i32* @g.hidden
-; CHECK: @a.hidden = hidden alias i32, i32* @g.hidden
+; CHECK: @a.hidden = hidden alias i32, ptr @g.hidden
 @a.protected = protected alias i32, i32* @g.protected
-; CHECK: @a.protected = protected alias i32, i32* @g.protected
+; CHECK: @a.protected = protected alias i32, ptr @g.protected
 
 ; Aliases -- DLLStorageClass
 @a.dlldefault = default alias i32, i32* @g.dlldefault
-; CHECK: @a.dlldefault = alias i32, i32* @g.dlldefault
+; CHECK: @a.dlldefault = alias i32, ptr @g.dlldefault
 @a.dllexport = dllexport alias i32, i32* @g.dllexport
-; CHECK: @a.dllexport = dllexport alias i32, i32* @g.dllexport
+; CHECK: @a.dllexport = dllexport alias i32, ptr @g.dllexport
 
 ; Aliases -- ThreadLocal
 @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
-; CHECK: @a.notthreadlocal = alias i32, i32* @g.notthreadlocal
+; CHECK: @a.notthreadlocal = alias i32, ptr @g.notthreadlocal
 @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
-; CHECK: @a.generaldynamic = thread_local alias i32, i32* @g.generaldynamic
+; CHECK: @a.generaldynamic = thread_local alias i32, ptr @g.generaldynamic
 @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
-; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, i32* @g.localdynamic
+; CHECK: @a.localdynamic = thread_local(localdynamic) alias i32, ptr @g.localdynamic
 @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
-; CHECK: @a.initialexec = thread_local(initialexec) alias i32, i32* @g.initialexec
+; CHECK: @a.initialexec = thread_local(initialexec) alias i32, ptr @g.initialexec
 @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
-; CHECK: @a.localexec = thread_local(localexec) alias i32, i32* @g.localexec
+; CHECK: @a.localexec = thread_local(localexec) alias i32, ptr @g.localexec
 
 ; Aliases -- unnamed_addr and local_unnamed_addr
 @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
-; CHECK: @a.unnamed_addr = unnamed_addr alias i32, i32* @g.unnamed_addr
+; CHECK: @a.unnamed_addr = unnamed_addr alias i32, ptr @g.unnamed_addr
 @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
-; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, i32* @g.local_unnamed_addr
+; CHECK: @a.local_unnamed_addr = local_unnamed_addr alias i32, ptr @g.local_unnamed_addr
 
 ;; IFunc
 ; Format @<Name> = [Linkage] [Visibility] ifunc <IFuncTy>,
-;                  <ResolverTy>* @<Resolver>
+;                  ptr @<Resolver>
 
 ; IFunc -- Linkage
 @ifunc.external = external ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.external = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.external = ifunc void (), ptr @ifunc_resolver
 @ifunc.private = private ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.private = private ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.private = private ifunc void (), ptr @ifunc_resolver
 @ifunc.internal = internal ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.internal = internal ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.internal = internal ifunc void (), ptr @ifunc_resolver
 
 ; IFunc -- Visibility
 @ifunc.default = default ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.default = ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.default = ifunc void (), ptr @ifunc_resolver
 @ifunc.hidden = hidden ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.hidden = hidden ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.hidden = hidden ifunc void (), ptr @ifunc_resolver
 @ifunc.protected = protected ifunc void (), i8* ()* @ifunc_resolver
-; CHECK: @ifunc.protected = protected ifunc void (), bitcast (i8* ()* @ifunc_resolver to void ()* ()*)
+; CHECK: @ifunc.protected = protected ifunc void (), ptr @ifunc_resolver
 
 define i8* @ifunc_resolver() {
 entry:
@@ -489,23 +489,23 @@ declare zeroext i64 @f.zeroext()
 declare signext i64 @f.signext()
 ; CHECK: declare signext i64 @f.signext()
 declare inreg i32* @f.inreg()
-; CHECK: declare inreg i32* @f.inreg()
+; CHECK: declare inreg ptr @f.inreg()
 declare noalias i32* @f.noalias()
-; CHECK: declare noalias i32* @f.noalias()
+; CHECK: declare noalias ptr @f.noalias()
 declare nonnull i32* @f.nonnull()
-; CHECK: declare nonnull i32* @f.nonnull()
+; CHECK: declare nonnull ptr @f.nonnull()
 declare dereferenceable(4) i32* @f.dereferenceable4()
-; CHECK: declare dereferenceable(4) i32* @f.dereferenceable4()
+; CHECK: declare dereferenceable(4) ptr @f.dereferenceable4()
 declare dereferenceable(8) i32* @f.dereferenceable8()
-; CHECK: declare dereferenceable(8) i32* @f.dereferenceable8()
+; CHECK: declare dereferenceable(8) ptr @f.dereferenceable8()
 declare dereferenceable(16) i32* @f.dereferenceable16()
-; CHECK: declare dereferenceable(16) i32* @f.dereferenceable16()
+; CHECK: declare dereferenceable(16) ptr @f.dereferenceable16()
 declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
-; CHECK: declare dereferenceable_or_null(4) i32* @f.dereferenceable4_or_null()
+; CHECK: declare dereferenceable_or_null(4) ptr @f.dereferenceable4_or_null()
 declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
-; CHECK: declare dereferenceable_or_null(8) i32* @f.dereferenceable8_or_null()
+; CHECK: declare dereferenceable_or_null(8) ptr @f.dereferenceable8_or_null()
 declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
-; CHECK: declare dereferenceable_or_null(16) i32* @f.dereferenceable16_or_null()
+; CHECK: declare dereferenceable_or_null(16) ptr @f.dereferenceable16_or_null()
 
 ; Functions -- Parameter attributes
 declare void @f.param.zeroext(i8 zeroext)
@@ -515,25 +515,25 @@ declare void @f.param.signext(i8 signext)
 declare void @f.param.inreg(i8 inreg)
 ; CHECK: declare void @f.param.inreg(i8 inreg)
 declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
-; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 }))
+; CHECK: declare void @f.param.byval(ptr byval({ i8, i8 }))
 declare void @f.param.inalloca(i8* inalloca)
-; CHECK: declare void @f.param.inalloca(i8* inalloca(i8))
+; CHECK: declare void @f.param.inalloca(ptr inalloca(i8))
 declare void @f.param.sret(i8* sret(i8))
-; CHECK: declare void @f.param.sret(i8* sret(i8))
+; CHECK: declare void @f.param.sret(ptr sret(i8))
 declare void @f.param.noalias(i8* noalias)
-; CHECK: declare void @f.param.noalias(i8* noalias)
+; CHECK: declare void @f.param.noalias(ptr noalias)
 declare void @f.param.nocapture(i8* nocapture)
-; CHECK: declare void @f.param.nocapture(i8* nocapture)
+; CHECK: declare void @f.param.nocapture(ptr nocapture)
 declare void @f.param.nest(i8* nest)
-; CHECK: declare void @f.param.nest(i8* nest)
+; CHECK: declare void @f.param.nest(ptr nest)
 declare i8* @f.param.returned(i8* returned)
-; CHECK: declare i8* @f.param.returned(i8* returned)
+; CHECK: declare ptr @f.param.returned(ptr returned)
 declare void @f.param.nonnull(i8* nonnull)
-; CHECK: declare void @f.param.nonnull(i8* nonnull)
+; CHECK: declare void @f.param.nonnull(ptr nonnull)
 declare void @f.param.dereferenceable(i8* dereferenceable(4))
-; CHECK: declare void @f.param.dereferenceable(i8* dereferenceable(4))
+; CHECK: declare void @f.param.dereferenceable(ptr dereferenceable(4))
 declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
-; CHECK: declare void @f.param.dereferenceable_or_null(i8* dereferenceable_or_null(4))
+; CHECK: declare void @f.param.dereferenceable_or_null(ptr dereferenceable_or_null(4))
 
 ; Functions -- unnamed_addr and local_unnamed_addr
 declare void @f.unnamed_addr() unnamed_addr
@@ -685,7 +685,7 @@ normal:
 declare i32 @f.personality_handler()
 ; CHECK: declare i32 @f.personality_handler()
 define void @f.personality() personality i32 ()* @f.personality_handler {
-; CHECK: define void @f.personality() personality i32 ()* @f.personality_handler
+; CHECK: define void @f.personality() personality ptr @f.personality_handler
   invoke void @llvm.donothing() to label %normal unwind label %exception
 exception:
   %cleanup = landingpad i32 cleanup
@@ -697,43 +697,43 @@ normal:
 ;; Atomic Memory Ordering Constraints
 define void @atomics(i32* %word) {
   %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
-  ; CHECK: %cmpxchg.0 = cmpxchg i32* %word, i32 0, i32 4 monotonic monotonic
+  ; CHECK: %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic
   %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
-  ; CHECK: %cmpxchg.1 = cmpxchg i32* %word, i32 0, i32 5 acq_rel monotonic
+  ; CHECK: %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic
   %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
-  ; CHECK: %cmpxchg.2 = cmpxchg i32* %word, i32 0, i32 6 acquire monotonic
+  ; CHECK: %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic
   %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
-  ; CHECK: %cmpxchg.3 = cmpxchg i32* %word, i32 0, i32 7 release monotonic
+  ; CHECK: %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic
   %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
-  ; CHECK: %cmpxchg.4 = cmpxchg i32* %word, i32 0, i32 8 seq_cst monotonic
+  ; CHECK: %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic
   %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
-  ; CHECK: %cmpxchg.5 = cmpxchg weak i32* %word, i32 0, i32 9 seq_cst monotonic
+  ; CHECK: %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic
   %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
-  ; CHECK: %cmpxchg.6 = cmpxchg volatile i32* %word, i32 0, i32 10 seq_cst monotonic
+  ; CHECK: %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic
   %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
-  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile i32* %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
+  ; CHECK: %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic
   %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
-  ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic
+  ; CHECK: %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic
   %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
-  ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic
+  ; CHECK: %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic
   %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
-  ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic
+  ; CHECK: %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic
   %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
-  ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic
+  ; CHECK: %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic
   %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
-  ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic
+  ; CHECK: %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic
   %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
-  ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic
+  ; CHECK: %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic
   %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
-  ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic
+  ; CHECK: %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic
   %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
-  ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic
+  ; CHECK: %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic
   %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
-  ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic
+  ; CHECK: %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic
   %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic
   %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
-  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic
+  ; CHECK: %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic
   fence acquire
   ; CHECK: fence acquire
   fence release
@@ -744,18 +744,18 @@ define void @atomics(i32* %word) {
   ; CHECK: fence syncscope("singlethread") seq_cst
 
   %ld.1 = load atomic i32, i32* %word monotonic, align 4
-  ; CHECK: %ld.1 = load atomic i32, i32* %word monotonic, align 4
+  ; CHECK: %ld.1 = load atomic i32, ptr %word monotonic, align 4
   %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
-  ; CHECK: %ld.2 = load atomic volatile i32, i32* %word acquire, align 8
+  ; CHECK: %ld.2 = load atomic volatile i32, ptr %word acquire, align 8
   %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
-  ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16
+  ; CHECK: %ld.3 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 16
 
   store atomic i32 23, i32* %word monotonic, align 4
-  ; CHECK: store atomic i32 23, i32* %word monotonic, align 4
+  ; CHECK: store atomic i32 23, ptr %word monotonic, align 4
   store atomic volatile i32 24, i32* %word monotonic, align 4
-  ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4
+  ; CHECK: store atomic volatile i32 24, ptr %word monotonic, align 4
   store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
-  ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4
+  ; CHECK: store atomic volatile i32 25, ptr %word syncscope("singlethread") monotonic, align 4
   ret void
 }
 
@@ -806,17 +806,17 @@ define void @fastMathFlagsForCalls(float %f, double %d1, <4 x double> %d2) {
 %opaquety = type opaque
 define void @typesystem() {
   %p0 = bitcast i8* null to i32 (i32)*
-  ; CHECK: %p0 = bitcast i8* null to i32 (i32)*
+  ; CHECK: %p0 = bitcast ptr null to ptr
   %p1 = bitcast i8* null to void (i8*)*
-  ; CHECK: %p1 = bitcast i8* null to void (i8*)*
+  ; CHECK: %p1 = bitcast ptr null to ptr
   %p2 = bitcast i8* null to i32 (i8*, ...)*
-  ; CHECK: %p2 = bitcast i8* null to i32 (i8*, ...)*
+  ; CHECK: %p2 = bitcast ptr null to ptr
   %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
-  ; CHECK: %p3 = bitcast i8* null to { i32, i8 } (i8*, ...)*
+  ; CHECK: %p3 = bitcast ptr null to ptr
   %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
-  ; CHECK: %p4 = bitcast i8* null to <{ i32, i8 }> (i8*, ...)*
+  ; CHECK: %p4 = bitcast ptr null to ptr
   %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
-  ; CHECK: %p5 = bitcast i8* null to <{ i32, i8 }> (<{ i8*, i64 }>*, ...)*
+  ; CHECK: %p5 = bitcast ptr null to ptr
 
   %t0 = alloca i1942652
   ; CHECK: %t0 = alloca i1942652
@@ -835,7 +835,7 @@ define void @typesystem() {
   %t7 = alloca x86_mmx
   ; CHECK: %t7 = alloca x86_mmx
   %t8 = alloca %opaquety*
-  ; CHECK: %t8 = alloca %opaquety*
+  ; CHECK: %t8 = alloca ptr
 
   ret void
 }
@@ -884,9 +884,9 @@ defaultdest.1:
 defaultdest.2:
 
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2]
   indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
-  ; CHECK: indirectbr i8* blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
+  ; CHECK: indirectbr ptr blockaddress(@instructions.terminators, %defaultdest.2), [label %defaultdest.2, label %defaultdest.2]
 
   invoke fastcc void @f.fastcc()
   ; CHECK: invoke fastcc void @f.fastcc()
@@ -926,7 +926,7 @@ catchswitch2:
 catchpad2:
   catchpad within %cs2 [i32* %arg1]
   br label %normal
-  ; CHECK: catchpad within %cs2 [i32* %arg1]
+  ; CHECK: catchpad within %cs2 [ptr %arg1]
   ; CHECK-NEXT: br label %normal
 
 catchswitch3:
@@ -935,7 +935,7 @@ catchswitch3:
 catchpad3:
   catchpad within %cs3 [i32* %arg1, i32* %arg2]
   br label %normal
-  ; CHECK: catchpad within %cs3 [i32* %arg1, i32* %arg2]
+  ; CHECK: catchpad within %cs3 [ptr %arg1, ptr %arg2]
   ; CHECK-NEXT: br label %normal
 
 cleanuppad1:
@@ -1115,17 +1115,17 @@ define void @instructions.aggregateops({ i8, i32 } %up, <{ i8, i32 }> %p,
   %n.ptr = alloca { i8, { i32 } }
 
   getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
-  ; CHECK: getelementptr { i8, i32 }, { i8, i32 }* %up.ptr, i8 0
+  ; CHECK: getelementptr { i8, i32 }, ptr %up.ptr, i8 0
   getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
-  ; CHECK: getelementptr <{ i8, i32 }>, <{ i8, i32 }>* %p.ptr, i8 1
+  ; CHECK: getelementptr <{ i8, i32 }>, ptr %p.ptr, i8 1
   getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
-  ; CHECK: getelementptr [3 x i8], [3 x i8]* %arr.ptr, i8 2
+  ; CHECK: getelementptr [3 x i8], ptr %arr.ptr, i8 2
   getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
-  ; CHECK: getelementptr { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 0, i32 1
+  ; CHECK: getelementptr { i8, { i32 } }, ptr %n.ptr, i32 0, i32 1
   getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
-  ; CHECK: getelementptr inbounds { i8, { i32 } }, { i8, { i32 } }* %n.ptr, i32 1, i32 0
+  ; CHECK: getelementptr inbounds { i8, { i32 } }, ptr %n.ptr, i32 1, i32 0
   getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
-  ; CHECK: getelementptr i8, <2 x i8*> %pvec, <2 x i64> %offsets
+  ; CHECK: getelementptr i8, <2 x ptr> %pvec, <2 x i64> %offsets
 
   ret void
 }
@@ -1141,14 +1141,14 @@ define void @instructions.memops(i32** %base) {
   ; CHECK: alloca inalloca i32, i8 4, align 4
 
   load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
   load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
-  ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
+  ; CHECK: load volatile ptr, ptr %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9
 
   store i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store ptr null, ptr %base, align 4, !nontemporal !8
   store volatile i32* null, i32** %base, align 4, !nontemporal !8
-  ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8
+  ; CHECK: store volatile ptr null, ptr %base, align 4, !nontemporal !8
 
   ret void
 }
@@ -1174,13 +1174,13 @@ define void @instructions.conversions() {
   sitofp i32 -1 to float
   ; CHECK: sitofp i32 -1 to float
   ptrtoint i8* null to i64
-  ; CHECK: ptrtoint i8* null to i64
+  ; CHECK: ptrtoint ptr null to i64
   inttoptr i64 0 to i8*
-  ; CHECK: inttoptr i64 0 to i8*
+  ; CHECK: inttoptr i64 0 to ptr
   bitcast i32 0 to i32
   ; CHECK: bitcast i32 0 to i32
   addrspacecast i32* null to i32 addrspace(1)*
-  ; CHECK: addrspacecast i32* null to i32 addrspace(1)*
+  ; CHECK: addrspacecast ptr null to ptr addrspace(1)
 
   ret void
 }
@@ -1267,16 +1267,16 @@ exit:
   ; CHECK: call void @f.strictfp() #9
 
   call fastcc noalias i32* @f.noalias() noinline
-  ; CHECK: call fastcc noalias i32* @f.noalias() #12
+  ; CHECK: call fastcc noalias ptr @f.noalias() #12
   tail call ghccc nonnull i32* @f.nonnull() minsize
-  ; CHECK: tail call ghccc nonnull i32* @f.nonnull() #7
+  ; CHECK: tail call ghccc nonnull ptr @f.nonnull() #7
 
   ret void
 }
 
 define void @instructions.call_musttail(i8* inalloca %val) {
   musttail call void @f.param.inalloca(i8* inalloca %val)
-  ; CHECK: musttail call void @f.param.inalloca(i8* inalloca(i8) %val)
+  ; CHECK: musttail call void @f.param.inalloca(ptr inalloca(i8) %val)
 
   ret void
 }
@@ -1307,7 +1307,7 @@ catch2:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch3:
@@ -1316,9 +1316,9 @@ catch3:
              cleanup
              ; CHECK: cleanup
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
              catch i32* null
-             ; CHECK: catch i32* null
+             ; CHECK: catch ptr null
   br label %proceed
 
 catch4:
@@ -1343,16 +1343,16 @@ define void @instructions.va_arg(i8* %v, ...) {
   %ap2 = bitcast i8** %ap to i8*
 
   call void @llvm.va_start(i8* %ap2)
-  ; CHECK: call void @llvm.va_start(i8* %ap2)
+  ; CHECK: call void @llvm.va_start(ptr %ap2)
 
   va_arg i8* %ap2, i32
-  ; CHECK: va_arg i8* %ap2, i32
+  ; CHECK: va_arg ptr %ap2, i32
 
   call void @llvm.va_copy(i8* %v, i8* %ap2)
-  ; CHECK: call void @llvm.va_copy(i8* %v, i8* %ap2)
+  ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
 
   call void @llvm.va_end(i8* %ap2)
-  ; CHECK: call void @llvm.va_end(i8* %ap2)
+  ; CHECK: call void @llvm.va_end(ptr %ap2)
 
   ret void
 }
@@ -1364,14 +1364,14 @@ declare void @llvm.gcwrite(i8*, i8*, i8**)
 define void @intrinsics.gc() gc "shadow-stack" {
   %ptrloc = alloca i8*
   call void @llvm.gcroot(i8** %ptrloc, i8* null)
-  ; CHECK: call void @llvm.gcroot(i8** %ptrloc, i8* null)
+  ; CHECK: call void @llvm.gcroot(ptr %ptrloc, ptr null)
 
   call i8* @llvm.gcread(i8* null, i8** %ptrloc)
-  ; CHECK: call i8* @llvm.gcread(i8* null, i8** %ptrloc)
+  ; CHECK: call ptr @llvm.gcread(ptr null, ptr %ptrloc)
 
   %ref = alloca i8
   call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
-  ; CHECK: call void @llvm.gcwrite(i8* %ref, i8* null, i8** %ptrloc)
+  ; CHECK: call void @llvm.gcwrite(ptr %ref, ptr null, ptr %ptrloc)
 
   ret void
 }
@@ -1394,9 +1394,9 @@ declare void @llvm.instrprof_increment(i8*, i64, i32, i32)
 !10 = !{!"rax"}
 define void @intrinsics.codegen() {
   call i8* @llvm.returnaddress(i32 1)
-  ; CHECK: call i8* @llvm.returnaddress(i32 1)
+  ; CHECK: call ptr @llvm.returnaddress(i32 1)
   call i8* @llvm.frameaddress(i32 1)
-  ; CHECK: call i8* @llvm.frameaddress.p0i8(i32 1)
+  ; CHECK: call ptr @llvm.frameaddress.p0(i32 1)
 
   call i32 @llvm.read_register.i32(metadata !10)
   ; CHECK: call i32 @llvm.read_register.i32(metadata !10)
@@ -1408,12 +1408,12 @@ define void @intrinsics.codegen() {
   ; CHECK: call void @llvm.write_register.i64(metadata !10, i64 0)
 
   %stack = call i8* @llvm.stacksave()
-  ; CHECK: %stack = call i8* @llvm.stacksave()
+  ; CHECK: %stack = call ptr @llvm.stacksave()
   call void @llvm.stackrestore(i8* %stack)
-  ; CHECK: call void @llvm.stackrestore(i8* %stack)
+  ; CHECK: call void @llvm.stackrestore(ptr %stack)
 
   call void @llvm.prefetch(i8* %stack, i32 0, i32 3, i32 0)
-  ; CHECK: call void @llvm.prefetch.p0i8(i8* %stack, i32 0, i32 3, i32 0)
+  ; CHECK: call void @llvm.prefetch.p0(ptr %stack, i32 0, i32 3, i32 0)
 
   call void @llvm.pcmarker(i32 1)
   ; CHECK: call void @llvm.pcmarker(i32 1)
@@ -1422,10 +1422,10 @@ define void @intrinsics.codegen() {
   ; CHECK: call i64 @llvm.readcyclecounter()
 
   call void @llvm.clear_cache(i8* null, i8* null)
-  ; CHECK: call void @llvm.clear_cache(i8* null, i8* null)
+  ; CHECK: call void @llvm.clear_cache(ptr null, ptr null)
 
   call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
-  ; CHECK: call void @llvm.instrprof_increment(i8* null, i64 0, i32 0, i32 0)
+  ; CHECK: call void @llvm.instrprof_increment(ptr null, i64 0, i32 0, i32 0)
 
   ret void
 }
@@ -1435,7 +1435,7 @@ declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx)
 define void @intrinsics.localescape() {
   %static.alloca = alloca i32
   call void (...) @llvm.localescape(i32* %static.alloca)
-  ; CHECK: call void (...) @llvm.localescape(i32* %static.alloca)
+  ; CHECK: call void (...) @llvm.localescape(ptr %static.alloca)
 
   call void @intrinsics.localrecover()
 
@@ -1445,7 +1445,7 @@ define void @intrinsics.localrecover() {
   %func = bitcast void ()* @intrinsics.localescape to i8*
   %fp = call i8* @llvm.frameaddress(i32 1)
   call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
-  ; CHECK: call i8* @llvm.localrecover(i8* %func, i8* %fp, i32 0)
+  ; CHECK: call ptr @llvm.localrecover(ptr %func, ptr %fp, i32 0)
 
   ret void
 }
@@ -1637,7 +1637,7 @@ declare void @f.speculatable() speculatable
 ;; Constant Expressions
 
 define i8** @constexpr() {
-  ; CHECK: ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
+  ; CHECK: ret ptr getelementptr inbounds ({ [4 x ptr], [4 x ptr] }, ptr null, i32 0, inrange i32 1, i32 2)
   ret i8** getelementptr inbounds ({ [4 x i8*], [4 x i8*] }, { [4 x i8*], [4 x i8*] }* null, i32 0, inrange i32 1, i32 2)
 }
 

diff  --git a/llvm/test/Bitcode/constantsTest.3.2.ll b/llvm/test/Bitcode/constantsTest.3.2.ll
index 3dea935dcf2d0..f20b1b20c1544 100644
--- a/llvm/test/Bitcode/constantsTest.3.2.ll
+++ b/llvm/test/Bitcode/constantsTest.3.2.ll
@@ -9,14 +9,14 @@
 @X = global i32 0
 ; CHECK: @Y = global i32 1
 @Y = global i32 1
-; CHECK: @Z = global [2 x i32*] [i32* @X, i32* @Y]
+; CHECK: @Z = global [2 x ptr] [ptr @X, ptr @Y]
 @Z = global [2 x i32*] [i32* @X, i32* @Y]
 
 
 define void @SimpleConstants(i32 %x) {
 entry:
 ; null
-; CHECK: store i32 %x, i32* null
+; CHECK: store i32 %x, ptr null
   store i32 %x, i32* null
  
 ; boolean
@@ -68,7 +68,7 @@ entry:
   %poison = sub nuw i32 0, 1
   
   ;address of basic block
-  ; CHECK-NEXT: %res2 = icmp eq i8* blockaddress(@OtherConstants, %Next), null
+  ; CHECK-NEXT: %res2 = icmp eq ptr blockaddress(@OtherConstants, %Next), null
   %res2 = icmp eq i8* blockaddress(@OtherConstants, %Next), null
   br label %Next
   Next: 
@@ -93,15 +93,15 @@ entry:
   uitofp i32 1 to float
   ; CHECK-NEXT: sitofp i32 -1 to float
   sitofp i32 -1 to float
-  ; CHECK-NEXT: ptrtoint i32* @X to i32
+  ; CHECK-NEXT: ptrtoint ptr @X to i32
   ptrtoint i32* @X to i32
-  ; CHECK-NEXT: inttoptr i8 1 to i8*
+  ; CHECK-NEXT: inttoptr i8 1 to ptr
   inttoptr i8 1 to i8*
   ; CHECK-NEXT: bitcast i32 1 to <2 x i16>
   bitcast i32 1 to <2 x i16>
-  ; CHECK-NEXT: getelementptr i32, i32* @X, i32 0
+  ; CHECK-NEXT: getelementptr i32, ptr @X, i32 0
   getelementptr i32, i32* @X, i32 0
-  ; CHECK-NEXT: getelementptr inbounds i32, i32* @X, i32 0
+  ; CHECK-NEXT: getelementptr inbounds i32, ptr @X, i32 0
   getelementptr inbounds i32, i32* @X, i32 0
   ; CHECK: select i1 true, i32 1, i32 0
   select i1 true ,i32 1, i32 0

diff  --git a/llvm/test/Bitcode/conversionInstructions.3.2.ll b/llvm/test/Bitcode/conversionInstructions.3.2.ll
index ae2d65eb2b617..3f0aa02f395d9 100644
--- a/llvm/test/Bitcode/conversionInstructions.3.2.ll
+++ b/llvm/test/Bitcode/conversionInstructions.3.2.ll
@@ -78,7 +78,7 @@ entry:
 
 define void @ptrtoint(i32* %src){
 entry:
-; CHECK: %res1 = ptrtoint i32* %src to i8
+; CHECK: %res1 = ptrtoint ptr %src to i8
   %res1 = ptrtoint i32* %src to i8
   
   ret void
@@ -86,7 +86,7 @@ entry:
 
 define void @inttoptr(i32 %src){
 entry:
-; CHECK: %res1 = inttoptr i32 %src to i32*
+; CHECK: %res1 = inttoptr i32 %src to ptr
   %res1 = inttoptr i32 %src to i32*
   
   ret void
@@ -97,7 +97,7 @@ entry:
 ; CHECK: %res1 = bitcast i32 %src1 to i32
   %res1 = bitcast i32 %src1 to i32
   
-; CHECK: %res2 = bitcast i32* %src2 to i64*
+; CHECK: %res2 = bitcast ptr %src2 to ptr
   %res2 = bitcast i32* %src2 to i64*
   
   ret void
@@ -105,9 +105,9 @@ entry:
 
 define void @ptrtointInstr(i32* %ptr, <4 x i32*> %vecPtr){
 entry:
-; CHECK: %res1 = ptrtoint i32* %ptr to i8
+; CHECK: %res1 = ptrtoint ptr %ptr to i8
   %res1 = ptrtoint i32* %ptr to i8  
-; CHECK-NEXT: %res2 = ptrtoint <4 x i32*> %vecPtr to <4 x i64>
+; CHECK-NEXT: %res2 = ptrtoint <4 x ptr> %vecPtr to <4 x i64>
   %res2 = ptrtoint <4 x i32*> %vecPtr to <4 x i64>
   
   ret void
@@ -115,9 +115,9 @@ entry:
 
 define void @inttoptrInstr(i32 %x, <4 x i32> %vec){
 entry:
-; CHECK: %res1 = inttoptr i32 %x to i64*
+; CHECK: %res1 = inttoptr i32 %x to ptr
   %res1 = inttoptr i32 %x to i64*
-; CHECK-NEXT: inttoptr <4 x i32> %vec to <4 x i8*>
+; CHECK-NEXT: inttoptr <4 x i32> %vec to <4 x ptr>
   %res2 = inttoptr <4 x i32> %vec to <4 x i8*>
   
   ret void

diff  --git a/llvm/test/Bitcode/dityperefs-3.8.ll b/llvm/test/Bitcode/dityperefs-3.8.ll
index 09225d4eba329..b14bcf7b04a78 100644
--- a/llvm/test/Bitcode/dityperefs-3.8.ll
+++ b/llvm/test/Bitcode/dityperefs-3.8.ll
@@ -19,7 +19,7 @@
 ; CHECK-NEXT: !8 = !DIObjCProperty(name: "P1", type: !1)
 ; CHECK-NEXT: !9 = !DITemplateTypeParameter(type: !1)
 ; CHECK-NEXT: !10 = !DIGlobalVariable(name: "G",{{.*}} type: !1,
-; CHECK-NEXT: !11 = !DITemplateValueParameter(type: !1, value: i32* @G1)
+; CHECK-NEXT: !11 = !DITemplateValueParameter(type: !1, value: ptr @G1)
 ; CHECK-NEXT: !12 = !DIImportedEntity(tag: DW_TAG_imported_module, name: "T2", scope: !0, entity: !1)
 ; CHECK-NEXT: !13 = !DICompositeType(tag: DW_TAG_structure_type, name: "T3", file: !0, elements: !14, identifier: "T3")
 ; CHECK-NEXT: !14 = !{!15}

diff  --git a/llvm/test/Bitcode/dso_location.ll b/llvm/test/Bitcode/dso_location.ll
index 43f96780fbc20..d4375bc83ab5a 100644
--- a/llvm/test/Bitcode/dso_location.ll
+++ b/llvm/test/Bitcode/dso_location.ll
@@ -22,13 +22,13 @@
 ; CHECK: @protected_local_global = protected global i32 0
 
 @local_alias = dso_local alias i32, i32* @local_global
-; CHECK-DAG: @local_alias = dso_local alias i32, i32* @local_global
+; CHECK-DAG: @local_alias = dso_local alias i32, ptr @local_global
 
 @preemptable_alias = dso_preemptable alias i32, i32* @hidden_local_global
-; CHECK-DAG: @preemptable_alias = alias i32, i32* @hidden_local_global
+; CHECK-DAG: @preemptable_alias = alias i32, ptr @hidden_local_global
 
 @preemptable_ifunc = dso_preemptable ifunc void (), void ()* ()* @ifunc_resolver
-; CHECK-DAG: @preemptable_ifunc = ifunc void (), void ()* ()* @ifunc_resolver
+; CHECK-DAG: @preemptable_ifunc = ifunc void (), ptr @ifunc_resolver
 declare dso_local default void @default_local()
 ; CHECK: declare dso_local void @default_local()
 

diff  --git a/llvm/test/Bitcode/function-address-space-fwd-decl.ll b/llvm/test/Bitcode/function-address-space-fwd-decl.ll
index f10c2287e4b17..36c07ec3eaaba 100644
--- a/llvm/test/Bitcode/function-address-space-fwd-decl.ll
+++ b/llvm/test/Bitcode/function-address-space-fwd-decl.ll
@@ -4,14 +4,14 @@
 define void @call_named() {
 entry:
   %0 = tail call addrspace(40) i32 @named(i16* null)
-  ; CHECK: %0 = tail call addrspace(40) i32 @named(i16* null)
+  ; CHECK: %0 = tail call addrspace(40) i32 @named(ptr null)
   ret void
 }
 
 define void @call_numbered() {
 entry:
   %0 = tail call addrspace(40) i32 @0(i16* null)
-  ; CHECK: %0 = tail call addrspace(40) i32 @0(i16* null)
+  ; CHECK: %0 = tail call addrspace(40) i32 @0(ptr null)
   ret void
 }
 
@@ -34,6 +34,6 @@ return:
 declare i32 @foo() addrspace(40)
 ; CHECK: declare i32 @foo() addrspace(40)
 declare i32 @named(i16* nocapture) addrspace(40)
-; CHECK: declare i32 @named(i16* nocapture) addrspace(40)
+; CHECK: declare i32 @named(ptr nocapture) addrspace(40)
 declare i32 @0(i16*) addrspace(40)
-; CHECK: declare i32 @0(i16*) addrspace(40)
+; CHECK: declare i32 @0(ptr) addrspace(40)

diff  --git a/llvm/test/Bitcode/getelementptr-zero-indices.ll b/llvm/test/Bitcode/getelementptr-zero-indices.ll
index 13b80aa8ee300..2b386901617d6 100644
--- a/llvm/test/Bitcode/getelementptr-zero-indices.ll
+++ b/llvm/test/Bitcode/getelementptr-zero-indices.ll
@@ -1,6 +1,6 @@
 ; RUN: llvm-as < %s | llvm-dis | FileCheck %s
 
-; CHECK: %g = getelementptr i8, i8* %p
+; CHECK: %g = getelementptr i8, ptr %p
 
 define i8* @ptr(i8* %p) {
   %g = getelementptr i8, i8* %p

diff  --git a/llvm/test/Bitcode/global-variables.3.2.ll b/llvm/test/Bitcode/global-variables.3.2.ll
index afd9cb14194be..cc951312b4ed1 100644
--- a/llvm/test/Bitcode/global-variables.3.2.ll
+++ b/llvm/test/Bitcode/global-variables.3.2.ll
@@ -27,7 +27,7 @@
 ; CHECK: @default_addrspace.var = global i8 1 
 
 @non_default_addrspace.var = addrspace(1) global i8* undef 
-; CHECK: @non_default_addrspace.var = addrspace(1) global i8* undef 
+; CHECK: @non_default_addrspace.var = addrspace(1) global ptr undef 
 
 @initialexec.var = thread_local(initialexec) global i32 0, align 4
 ; CHECK: @initialexec.var = thread_local(initialexec) global i32 0, align 4

diff  --git a/llvm/test/Bitcode/highLevelStructure.3.2.ll b/llvm/test/Bitcode/highLevelStructure.3.2.ll
index 84c5a8ecda6db..d9797b3202cd8 100644
--- a/llvm/test/Bitcode/highLevelStructure.3.2.ll
+++ b/llvm/test/Bitcode/highLevelStructure.3.2.ll
@@ -13,39 +13,39 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 module asm "some assembly"
 
 ; Named Types Test
-; CHECK: %mytype = type { %mytype*, i32 }
+; CHECK: %mytype = type { ptr, i32 }
 %mytype = type { %mytype*, i32 }
 
 ; Aliases Test
 ; CHECK: @glob1 = global i32 1
 @glob1 = global i32 1
-; CHECK: @aliased1 = alias i32, i32* @glob1
+; CHECK: @aliased1 = alias i32, ptr @glob1
 @aliased1 = alias i32, i32* @glob1
-; CHECK-NEXT: @aliased2 = internal alias i32, i32* @glob1
+; CHECK-NEXT: @aliased2 = internal alias i32, ptr @glob1
 @aliased2 = internal alias i32, i32* @glob1
-; CHECK-NEXT: @aliased3 = alias i32, i32* @glob1
+; CHECK-NEXT: @aliased3 = alias i32, ptr @glob1
 @aliased3 = external alias i32, i32* @glob1
-; CHECK-NEXT: @aliased4 = weak alias i32, i32* @glob1
+; CHECK-NEXT: @aliased4 = weak alias i32, ptr @glob1
 @aliased4 = weak alias i32, i32* @glob1
-; CHECK-NEXT: @aliased5 = weak_odr alias i32, i32* @glob1
+; CHECK-NEXT: @aliased5 = weak_odr alias i32, ptr @glob1
 @aliased5 = weak_odr alias i32, i32* @glob1
 
 ;Parameter Attribute Test
 ; CHECK: declare void @ParamAttr1(i8 zeroext)
 declare void @ParamAttr1(i8 zeroext)
-; CHECK: declare void @ParamAttr2(i8* nest)
+; CHECK: declare void @ParamAttr2(ptr nest)
 declare void @ParamAttr2(i8* nest)
-; CHECK: declare void @ParamAttr3(i8* sret(i8))
+; CHECK: declare void @ParamAttr3(ptr sret(i8))
 declare void @ParamAttr3(i8* sret(i8))
 ; CHECK: declare void @ParamAttr4(i8 signext)
 declare void @ParamAttr4(i8 signext)
-; CHECK: declare void @ParamAttr5(i8* inreg)
+; CHECK: declare void @ParamAttr5(ptr inreg)
 declare void @ParamAttr5(i8* inreg)
-; CHECK: declare void @ParamAttr6(i8* byval(i8))
+; CHECK: declare void @ParamAttr6(ptr byval(i8))
 declare void @ParamAttr6(i8* byval(i8))
-; CHECK: declare void @ParamAttr7(i8* noalias)
+; CHECK: declare void @ParamAttr7(ptr noalias)
 declare void @ParamAttr7(i8* noalias)
-; CHECK: declare void @ParamAttr8(i8* nocapture)
+; CHECK: declare void @ParamAttr8(ptr nocapture)
 declare void @ParamAttr8(i8* nocapture)
 ; CHECK: declare void @ParamAttr9{{[(i8* nest noalias nocapture) | (i8* noalias nocapture nest)]}}
 declare void @ParamAttr9(i8* nest noalias nocapture)

diff  --git a/llvm/test/Bitcode/inalloca-upgrade.test b/llvm/test/Bitcode/inalloca-upgrade.test
index 20d41365b3601..b7678c34fcac5 100644
--- a/llvm/test/Bitcode/inalloca-upgrade.test
+++ b/llvm/test/Bitcode/inalloca-upgrade.test
@@ -3,5 +3,5 @@ RUN: llvm-dis %p/Inputs/inalloca-upgrade.bc -o - | FileCheck %s
 Make sure we upgrade old-style IntAttribute inalloca records to a
 fully typed version correctly.
 
-CHECK: call void @bar({ i32*, i8 }* inalloca({ i32*, i8 }) %ptr)
-CHECK: invoke void @bar({ i32*, i8 }* inalloca({ i32*, i8 }) %ptr)
+CHECK: call void @bar(ptr inalloca({ ptr, i8 }) %ptr)
+CHECK: invoke void @bar(ptr inalloca({ ptr, i8 }) %ptr)

diff  --git a/llvm/test/Bitcode/inalloca.ll b/llvm/test/Bitcode/inalloca.ll
index d69b6c9d24789..294f44e61e437 100644
--- a/llvm/test/Bitcode/inalloca.ll
+++ b/llvm/test/Bitcode/inalloca.ll
@@ -6,7 +6,7 @@
 define void @foo(i32* inalloca(i32) %args) {
   ret void
 }
-; CHECK-LABEL: define void @foo(i32* inalloca(i32) %args)
+; CHECK-LABEL: define void @foo(ptr inalloca(i32) %args)
 
 define void @bar() {
   ; Use the maximum alignment, since we stuff our bit with alignment.
@@ -16,4 +16,4 @@ define void @bar() {
 }
 ; CHECK-LABEL: define void @bar() {
 ; CHECK: %args = alloca inalloca i32, align 4294967296
-; CHECK: call void @foo(i32* inalloca(i32) %args)
+; CHECK: call void @foo(ptr inalloca(i32) %args)

diff  --git a/llvm/test/Bitcode/intrinsics-struct-upgrade.ll b/llvm/test/Bitcode/intrinsics-struct-upgrade.ll
index 2d7229a38e743..e7e944ed13121 100644
--- a/llvm/test/Bitcode/intrinsics-struct-upgrade.ll
+++ b/llvm/test/Bitcode/intrinsics-struct-upgrade.ll
@@ -4,8 +4,8 @@
 
 declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8*)
 
-; CHECK-LABEL: define %struct.__neon_int8x8x2_t @test_named_struct_return(i8* %A) {
-; CHECK:  %1 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
+; CHECK-LABEL: define %struct.__neon_int8x8x2_t @test_named_struct_return(ptr %A) {
+; CHECK:  %1 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %A)
 ; CHECK:  %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
 ; CHECK:  %3 = insertvalue %struct.__neon_int8x8x2_t poison, <8 x i8> %2, 0
 ; CHECK:  %4 = extractvalue { <8 x i8>, <8 x i8> } %1, 1

diff  --git a/llvm/test/Bitcode/intrinsics-with-unnamed-types.ll b/llvm/test/Bitcode/intrinsics-with-unnamed-types.ll
index 02d86ec2c5da9..59567e31caec5 100644
--- a/llvm/test/Bitcode/intrinsics-with-unnamed-types.ll
+++ b/llvm/test/Bitcode/intrinsics-with-unnamed-types.ll
@@ -7,10 +7,10 @@
 %0 = type opaque
 
 ; CHECK-LABEL: @f0(
-; CHECK: %c1 = call %0* @llvm.ssa.copy.p0s_s.0(%0* %arg)
-; CHECK: %c2 = call %1* @llvm.ssa.copy.p0s_s.1(%1* %tmp)
-; CHECK: %c3 = call %0** @llvm.ssa.copy.p0p0s_s.1(%0** %arg2)
-; CHECK: %c4 = call %1** @llvm.ssa.copy.p0p0s_s.0(%1** %tmp2)
+; CHECK: %c1 = call ptr @llvm.ssa.copy.p0(ptr %arg)
+; CHECK: %c2 = call ptr @llvm.ssa.copy.p0(ptr %tmp)
+; CHECK: %c3 = call ptr @llvm.ssa.copy.p0(ptr %arg2)
+; CHECK: %c4 = call ptr @llvm.ssa.copy.p0(ptr %tmp2)
 
 define void @f0(%0* %arg, %1* %tmp, %1** %tmp2, %0** %arg2) {
 bb:

diff  --git a/llvm/test/Bitcode/invalid.test b/llvm/test/Bitcode/invalid.test
index bd2e6ceaab34b..cb0677cf64303 100644
--- a/llvm/test/Bitcode/invalid.test
+++ b/llvm/test/Bitcode/invalid.test
@@ -14,14 +14,6 @@ RUN: not llvm-dis -disable-output %p/Inputs/invalid-bitwidth.bc 2>&1 | \
 RUN:   FileCheck --check-prefix=BAD-BITWIDTH %s
 RUN: not llvm-dis -disable-output %p/Inputs/invalid-align.bc  2>&1 | \
 RUN:   FileCheck --check-prefix=BAD-ALIGN %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-gep-mismatched-explicit-type.bc 2>&1 | \
-RUN:   FileCheck --check-prefix=MISMATCHED-EXPLICIT-GEP %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-load-mismatched-explicit-type.bc 2>&1 | \
-RUN:   FileCheck --check-prefix=MISMATCHED-EXPLICIT-LOAD %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-gep-operator-mismatched-explicit-type.bc 2>&1 | \
-RUN:   FileCheck --check-prefix=MISMATCHED-EXPLICIT-GEP-OPERATOR %s
-RUN: not llvm-dis -disable-output %p/Inputs/invalid-call-mismatched-explicit-type.bc 2>&1 | \
-RUN:   FileCheck --check-prefix=MISMATCHED-EXPLICIT-CALL %s
 RUN: not llvm-dis -disable-output %p/Inputs/invalid-call-non-function-explicit-type.bc 2>&1 | \
 RUN:   FileCheck --check-prefix=NON-FUNCTION-EXPLICIT-CALL %s
 RUN: not llvm-dis -disable-output %p/Inputs/invalid-invoke-mismatched-explicit-type.bc 2>&1 | \
@@ -37,12 +29,8 @@ BAD-ABBREV-NUMBER: error: can't skip to bit
 BAD-TYPE-TABLE-FORWARD-REF: Invalid TYPE table: Only named structs can be forward referenced
 BAD-BITWIDTH: error: can't skip to bit
 BAD-ALIGN: Invalid alignment value
-MISMATCHED-EXPLICIT-GEP: Explicit gep type does not match pointee type of pointer operand
-MISMATCHED-EXPLICIT-LOAD: Explicit load/store type does not match pointee type of pointer operand
-MISMATCHED-EXPLICIT-GEP-OPERATOR: Explicit gep operator type does not match pointee type of pointer operand
-MISMATCHED-EXPLICIT-CALL: Explicit call type does not match pointee type of callee operand
 NON-FUNCTION-EXPLICIT-CALL: Explicit call type is not a function type
-MISMATCHED-EXPLICIT-INVOKE: Explicit invoke type does not match pointee type of callee operand
+MISMATCHED-EXPLICIT-INVOKE: Insufficient operands to call
 NON-FUNCTION-EXPLICIT-INVOKE: Explicit invoke type is not a function type
 
 RUN: not llvm-dis -disable-output %p/Inputs/invalid-extractval-array-idx.bc 2>&1 | \
@@ -199,7 +187,7 @@ VECTOR-LENGTH: Invalid vector length
 RUN: not llvm-dis -disable-output %p/Inputs/invalid-alias-type-mismatch.bc 2>&1 | \
 RUN:   FileCheck --check-prefix=ALIAS-TYPE-MISMATCH %s
 
-ALIAS-TYPE-MISMATCH: Alias and aliasee types don't match
+ALIAS-TYPE-MISMATCH: Insufficient function protos
 
 RUN: not llvm-dis -disable-output %p/Inputs/invalid-no-function-block.bc 2>&1 | \
 RUN:   FileCheck --check-prefix=NO-FUNCTION-BLOCK %s

diff  --git a/llvm/test/Bitcode/local-linkage-default-visibility.3.4.ll b/llvm/test/Bitcode/local-linkage-default-visibility.3.4.ll
index 15ff5e3a6af87..2204407056e73 100644
--- a/llvm/test/Bitcode/local-linkage-default-visibility.3.4.ll
+++ b/llvm/test/Bitcode/local-linkage-default-visibility.3.4.ll
@@ -26,22 +26,22 @@
 @global = global i32 0
 
 @default.internal.alias = alias internal i32, internal i32* @global
-; CHECK: @default.internal.alias = internal alias i32, i32* @global
+; CHECK: @default.internal.alias = internal alias i32, ptr @global
 
 @hidden.internal.alias = hidden alias internal i32, internal i32* @global
-; CHECK: @hidden.internal.alias = internal alias i32, i32* @global
+; CHECK: @hidden.internal.alias = internal alias i32, ptr @global
 
 @protected.internal.alias = protected alias internal i32, internal i32* @global
-; CHECK: @protected.internal.alias = internal alias i32, i32* @global
+; CHECK: @protected.internal.alias = internal alias i32, ptr @global
 
 @default.private.alias = alias private i32, private i32* @global
-; CHECK: @default.private.alias = private alias i32, i32* @global
+; CHECK: @default.private.alias = private alias i32, ptr @global
 
 @hidden.private.alias = hidden alias private i32, private i32* @global
-; CHECK: @hidden.private.alias = private alias i32, i32* @global
+; CHECK: @hidden.private.alias = private alias i32, ptr @global
 
 @protected.private.alias = protected alias private i32, private i32* @global
-; CHECK: @protected.private.alias = private alias i32, i32* @global
+; CHECK: @protected.private.alias = private alias i32, ptr @global
 
 define internal void @default.internal() {
 ; CHECK: define internal void @default.internal

diff  --git a/llvm/test/Bitcode/memInstructions.3.2.ll b/llvm/test/Bitcode/memInstructions.3.2.ll
index 14bfc5dd082ac..756f0dc983a3a 100644
--- a/llvm/test/Bitcode/memInstructions.3.2.ll
+++ b/llvm/test/Bitcode/memInstructions.3.2.ll
@@ -27,52 +27,52 @@ entry:
   %ptr1 = alloca i8
   store i8 2, i8* %ptr1
 
-; CHECK: %res1 = load i8, i8* %ptr1
+; CHECK: %res1 = load i8, ptr %ptr1
   %res1 = load i8, i8* %ptr1
 
-; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1
+; CHECK-NEXT: %res2 = load volatile i8, ptr %ptr1
   %res2 = load volatile i8, i8* %ptr1
 
-; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1
+; CHECK-NEXT: %res3 = load i8, ptr %ptr1, align 1
   %res3 = load i8, i8* %ptr1, align 1
 
-; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1
+; CHECK-NEXT: %res4 = load volatile i8, ptr %ptr1, align 1
   %res4 = load volatile i8, i8* %ptr1, align 1
 
-; CHECK-NEXT: %res5 = load i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res5 = load i8, ptr %ptr1, align 1, !nontemporal !0
   %res5 = load i8, i8* %ptr1, !nontemporal !0
 
-; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res6 = load volatile i8, ptr %ptr1, align 1, !nontemporal !0
   %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
 
-; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res7 = load i8, ptr %ptr1, align 1, !nontemporal !0
   %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
 
-; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res8 = load volatile i8, ptr %ptr1, align 1, !nontemporal !0
   %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
 
-; CHECK-NEXT: %res9 = load i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res9 = load i8, ptr %ptr1, align 1, !invariant.load !1
   %res9 = load i8, i8* %ptr1, !invariant.load !1
 
-; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res10 = load volatile i8, ptr %ptr1, align 1, !invariant.load !1
   %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
 
-; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res11 = load i8, ptr %ptr1, align 1, !invariant.load !1
   %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
 
-; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res12 = load volatile i8, ptr %ptr1, align 1, !invariant.load !1
   %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
 
-; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res13 = load i8, ptr %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
   %res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
 
-; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res14 = load volatile i8, ptr %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
   %res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
 
-; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res15 = load i8, ptr %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
   %res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
 
-; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+; CHECK-NEXT: %res16 = load volatile i8, ptr %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
   %res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
 
   ret void
@@ -83,52 +83,52 @@ entry:
   %ptr1 = alloca i8
   store i8 2, i8* %ptr1
 
-; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1
+; CHECK: %res1 = load atomic i8, ptr %ptr1 unordered, align 1
   %res1 = load atomic i8, i8* %ptr1 unordered, align 1
 
-; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res2 = load atomic i8, ptr %ptr1 monotonic, align 1
   %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
 
-; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res3 = load atomic i8, ptr %ptr1 acquire, align 1
   %res3 = load atomic i8, i8* %ptr1 acquire, align 1
 
-; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res4 = load atomic i8, ptr %ptr1 seq_cst, align 1
   %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
 
-; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
+; CHECK-NEXT: %res5 = load atomic volatile i8, ptr %ptr1 unordered, align 1
   %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
 
-; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res6 = load atomic volatile i8, ptr %ptr1 monotonic, align 1
   %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
 
-; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res7 = load atomic volatile i8, ptr %ptr1 acquire, align 1
   %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
 
-; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res8 = load atomic volatile i8, ptr %ptr1 seq_cst, align 1
   %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
 
-; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: %res9 = load atomic i8, ptr %ptr1 syncscope("singlethread") unordered, align 1
   %res9 = load atomic i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
 
-; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: %res10 = load atomic i8, ptr %ptr1 syncscope("singlethread") monotonic, align 1
   %res10 = load atomic i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
 
-; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+; CHECK-NEXT: %res11 = load atomic i8, ptr %ptr1 syncscope("singlethread") acquire, align 1
   %res11 = load atomic i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
 
-; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: %res12 = load atomic i8, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
   %res12 = load atomic i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
 
-; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: %res13 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") unordered, align 1
   %res13 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") unordered, align 1
 
-; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: %res14 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") monotonic, align 1
   %res14 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") monotonic, align 1
 
-; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
+; CHECK-NEXT: %res15 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") acquire, align 1
   %res15 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") acquire, align 1
 
-; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: %res16 = load atomic volatile i8, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
   %res16 = load atomic volatile i8, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
 
   ret void
@@ -138,28 +138,28 @@ define void @store(){
 entry:
   %ptr1 = alloca i8
 
-; CHECK: store i8 2, i8* %ptr1
+; CHECK: store i8 2, ptr %ptr1
   store i8 2, i8* %ptr1
 
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1
   store volatile i8 2, i8* %ptr1
 
-; CHECK-NEXT: store i8 2, i8* %ptr1, align 1
+; CHECK-NEXT: store i8 2, ptr %ptr1, align 1
   store i8 2, i8* %ptr1, align 1
 
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1, align 1
   store volatile i8 2, i8* %ptr1, align 1
 
-; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store i8 2, ptr %ptr1, align 1, !nontemporal !0
   store i8 2, i8* %ptr1, !nontemporal !0
 
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1, align 1, !nontemporal !0
   store volatile i8 2, i8* %ptr1, !nontemporal !0
 
-; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store i8 2, ptr %ptr1, align 1, !nontemporal !0
   store i8 2, i8* %ptr1, align 1, !nontemporal !0
 
-; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: store volatile i8 2, ptr %ptr1, align 1, !nontemporal !0
   store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
 
   ret void
@@ -169,52 +169,52 @@ define void @storeAtomic(){
 entry:
   %ptr1 = alloca i8
 
-; CHECK: store atomic i8 2, i8* %ptr1 unordered, align 1
+; CHECK: store atomic i8 2, ptr %ptr1 unordered, align 1
   store atomic i8 2, i8* %ptr1 unordered, align 1
 
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 monotonic, align 1
   store atomic i8 2, i8* %ptr1 monotonic, align 1
 
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 release, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 release, align 1
   store atomic i8 2, i8* %ptr1 release, align 1
 
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 seq_cst, align 1
   store atomic i8 2, i8* %ptr1 seq_cst, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 unordered, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 unordered, align 1
   store atomic volatile i8 2, i8* %ptr1 unordered, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 monotonic, align 1
   store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 release, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 release, align 1
   store atomic volatile i8 2, i8* %ptr1 release, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 seq_cst, align 1
   store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
 
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") unordered, align 1
   store atomic i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
 
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") monotonic, align 1
   store atomic i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
 
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") release, align 1
   store atomic i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
 
-; CHECK-NEXT: store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: store atomic i8 2, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
   store atomic i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") unordered, align 1
   store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") unordered, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") monotonic, align 1
   store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") monotonic, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") release, align 1
   store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") release, align 1
 
-; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
+; CHECK-NEXT: store atomic volatile i8 2, ptr %ptr1 syncscope("singlethread") seq_cst, align 1
   store atomic volatile i8 2, i8* %ptr1 syncscope("singlethread") seq_cst, align 1
 
   ret void
@@ -222,89 +222,89 @@ entry:
 
 define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){
 entry:
-  ;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
+  ;cmpxchg [volatile] ptr <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
 
-; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new monotonic monotonic
 ; CHECK-NEXT: %res1 = extractvalue { i32, i1 } [[TMP]], 0
   %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new monotonic monotonic
 ; CHECK-NEXT: %res2 = extractvalue { i32, i1 } [[TMP]], 0
   %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
 ; CHECK-NEXT: %res3 = extractvalue { i32, i1 } [[TMP]], 0
   %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
 ; CHECK-NEXT: %res4 = extractvalue { i32, i1 } [[TMP]], 0
   %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") monotonic monotonic
 
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new acquire acquire
 ; CHECK-NEXT: %res5 = extractvalue { i32, i1 } [[TMP]], 0
   %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new acquire acquire
 ; CHECK-NEXT: %res6 = extractvalue { i32, i1 } [[TMP]], 0
   %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
 ; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0
   %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
 ; CHECK-NEXT: %res8 = extractvalue { i32, i1 } [[TMP]], 0
   %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acquire acquire
 
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new release monotonic
 ; CHECK-NEXT: %res9 = extractvalue { i32, i1 } [[TMP]], 0
   %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new release monotonic
 ; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0
   %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
 ; CHECK-NEXT: %res11 = extractvalue { i32, i1 } [[TMP]], 0
   %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
 ; CHECK-NEXT: %res12 = extractvalue { i32, i1 } [[TMP]], 0
   %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") release monotonic
 
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new acq_rel acquire
 ; CHECK-NEXT: %res13 = extractvalue { i32, i1 } [[TMP]], 0
   %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new acq_rel acquire
 ; CHECK-NEXT: %res14 = extractvalue { i32, i1 } [[TMP]], 0
   %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
 ; CHECK-NEXT: %res15 = extractvalue { i32, i1 } [[TMP]], 0
   %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
 ; CHECK-NEXT: %res16 = extractvalue { i32, i1 } [[TMP]], 0
   %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") acq_rel acquire
 
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
 ; CHECK-NEXT: %res17 = extractvalue { i32, i1 } [[TMP]], 0
   %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
 ; CHECK-NEXT: %res18 = extractvalue { i32, i1 } [[TMP]], 0
   %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
 ; CHECK-NEXT: %res19 = extractvalue { i32, i1 } [[TMP]], 0
   %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
 
-; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
+; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile ptr %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
 ; CHECK-NEXT: %res20 = extractvalue { i32, i1 } [[TMP]], 0
   %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new syncscope("singlethread") seq_cst seq_cst
 
@@ -313,13 +313,13 @@ entry:
 
 define void @getelementptr({i8, i8}, {i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){
 entry:
-; CHECK: %res1 = getelementptr { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
+; CHECK: %res1 = getelementptr { i8, i8 }, ptr %s, i32 1, i32 1
   %res1 = getelementptr {i8, i8}, {i8, i8}* %s, i32 1, i32 1
 
-; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
+; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, ptr %s, i32 1, i32 1
   %res2 = getelementptr inbounds {i8, i8}, {i8, i8}* %s, i32 1, i32 1
 
-; CHECK-NEXT: %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
+; CHECK-NEXT: %res3 = getelementptr i8, <4 x ptr> %ptrs, <4 x i64> %offsets
   %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
 
   ret void

diff  --git a/llvm/test/Bitcode/metadata-2.ll b/llvm/test/Bitcode/metadata-2.ll
index 040087a9a4e02..1375563fd6815 100644
--- a/llvm/test/Bitcode/metadata-2.ll
+++ b/llvm/test/Bitcode/metadata-2.ll
@@ -1,5 +1,4 @@
 ; RUN: llvm-as < %s | llvm-dis -disable-output
-; RUN: verify-uselistorder < %s
 	%0 = type { %object.ModuleInfo.__vtbl*, i8*, %"byte[]", %1, %"ClassInfo[]", i32, void ()*, void ()*, void ()*, i8*, void ()* }		; type %0
 	%1 = type { i64, %object.ModuleInfo* }		; type %1
 	%2 = type { i32, void ()*, i8* }		; type %2
@@ -19,12 +18,12 @@
 	%object.TypeInfo = type { %object.TypeInfo.__vtbl*, i8* }
 	%object.TypeInfo.__vtbl = type { %object.ClassInfo*, %"byte[]" (%object.Object*)*, i64 (%object.Object*)*, i32 (%object.Object*, %object.Object*)*, i32 (%object.Object*, %object.Object*)*, i64 (%object.TypeInfo*, i8*)*, i32 (%object.TypeInfo*, i8*, i8*)*, i32 (%object.TypeInfo*, i8*, i8*)*, i64 (%object.TypeInfo*)*, void (%object.TypeInfo*, i8*, i8*)*, %object.TypeInfo* (%object.TypeInfo*)*, %"byte[]" (%object.TypeInfo*)*, i32 (%object.TypeInfo*)*, %"OffsetTypeInfo[]" (%object.TypeInfo*)* }
 	%"void*[]" = type { i64, i8** }
- at _D10ModuleInfo6__vtblZ = external constant %object.ModuleInfo.__vtbl		; <%object.ModuleInfo.__vtbl*> [#uses=1]
- at .str = internal constant [20 x i8] c"tango.core.BitManip\00"		; <[20 x i8]*> [#uses=1]
- at _D5tango4core8BitManip8__ModuleZ = global %0 { %object.ModuleInfo.__vtbl* @_D10ModuleInfo6__vtblZ, i8* null, %"byte[]" { i64 19, i8* getelementptr ([20 x i8], [20 x i8]* @.str, i32 0, i32 0) }, %1 zeroinitializer, %"ClassInfo[]" zeroinitializer, i32 4, void ()* null, void ()* null, void ()* null, i8* null, void ()* null }		; <%0*> [#uses=1]
- at _D5tango4core8BitManip11__moduleRefZ = internal global %ModuleReference { %ModuleReference* null, %object.ModuleInfo* bitcast (%0* @_D5tango4core8BitManip8__ModuleZ to %object.ModuleInfo*) }		; <%ModuleReference*> [#uses=2]
- at _Dmodule_ref = external global %ModuleReference*		; <%ModuleReference**> [#uses=2]
- at llvm.global_ctors = appending constant [1 x %2] [%2 { i32 65535, void ()* @_D5tango4core8BitManip16__moduleinfoCtorZ, i8* null }]		; <[1 x %2]*> [#uses=0]
+ at _D10ModuleInfo6__vtblZ = external constant %object.ModuleInfo.__vtbl		; <ptr> [#uses=1]
+ at .str = internal constant [20 x i8] c"tango.core.BitManip\00"		; <ptr> [#uses=1]
+ at _D5tango4core8BitManip8__ModuleZ = global %0 { %object.ModuleInfo.__vtbl* @_D10ModuleInfo6__vtblZ, i8* null, %"byte[]" { i64 19, i8* getelementptr ([20 x i8], [20 x i8]* @.str, i32 0, i32 0) }, %1 zeroinitializer, %"ClassInfo[]" zeroinitializer, i32 4, void ()* null, void ()* null, void ()* null, i8* null, void ()* null }		; <ptr> [#uses=1]
+ at _D5tango4core8BitManip11__moduleRefZ = internal global %ModuleReference { %ModuleReference* null, %object.ModuleInfo* bitcast (%0* @_D5tango4core8BitManip8__ModuleZ to %object.ModuleInfo*) }		; <ptr> [#uses=2]
+ at _Dmodule_ref = external global %ModuleReference*		; <ptr> [#uses=2]
+ at llvm.global_ctors = appending constant [1 x %2] [%2 { i32 65535, void ()* @_D5tango4core8BitManip16__moduleinfoCtorZ, i8* null }]		; <ptr> [#uses=0]
 
 define fastcc i32 @_D5tango4core8BitManip6popcntFkZi(i32 %x_arg) nounwind readnone {
 entry:
@@ -77,7 +76,7 @@ entry:
 
 define internal void @_D5tango4core8BitManip16__moduleinfoCtorZ() nounwind {
 moduleinfoCtorEntry:
-	%current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref		; <%ModuleReference*> [#uses=1]
+	%current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref		; <ptr> [#uses=1]
 	store %ModuleReference* %current, %ModuleReference** getelementptr (%ModuleReference, %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, i32 0, i32 0)
 	store %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, %ModuleReference** @_Dmodule_ref
 	ret void

diff  --git a/llvm/test/Bitcode/metadata.3.5.ll b/llvm/test/Bitcode/metadata.3.5.ll
index ae7b83a947464..ca09b756dd64c 100644
--- a/llvm/test/Bitcode/metadata.3.5.ll
+++ b/llvm/test/Bitcode/metadata.3.5.ll
@@ -18,7 +18,7 @@ declare void @llvm.bar(metadata)
 
 @global = global i32 0
 
-; CHECK: !0 = !{!1, !2, i32* @global, null}
+; CHECK: !0 = !{!1, !2, ptr @global, null}
 ; CHECK: !1 = !{!2, null}
 ; CHECK: !2 = !{}
 !0 = metadata !{metadata !1, metadata !2, i32* @global, null}

diff  --git a/llvm/test/Bitcode/miscInstructions.3.2.ll b/llvm/test/Bitcode/miscInstructions.3.2.ll
index 6b8995107264f..a1fb3663d0adb 100644
--- a/llvm/test/Bitcode/miscInstructions.3.2.ll
+++ b/llvm/test/Bitcode/miscInstructions.3.2.ll
@@ -14,21 +14,21 @@ entry:
 }
 
 ; CHECK-LABEL: define void @landingpadInstr1
-; CHECK-SAME: personality i32 (...)* @__gxx_personality_v0
+; CHECK-SAME: personality ptr @__gxx_personality_v0
 define void @landingpadInstr1(i1 %cond1, <2 x i1> %cond2, <2 x i8> %x1, <2 x i8> %x2){
 entry:
-; CHECK: %res = landingpad { i8*, i32 }
+; CHECK: %res = landingpad { ptr, i32 }
   %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 
-; CHECK: catch i8** @_ZTIi
+; CHECK: catch ptr @_ZTIi
   catch i8** @_ZTIi
   ret void
 }
 
 ; CHECK-LABEL: define void @landingpadInstr2
-; CHECK-SAME: personality i32 (...)* @__gxx_personality_v0
+; CHECK-SAME: personality ptr @__gxx_personality_v0
 define void @landingpadInstr2(i1 %cond1, <2 x i1> %cond2, <2 x i8> %x1, <2 x i8> %x2){
 entry:
-; CHECK: %res = landingpad { i8*, i32 }
+; CHECK: %res = landingpad { ptr, i32 }
   %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
 ; CHECK: cleanup
   cleanup
@@ -36,14 +36,14 @@ entry:
 }
 
 ; CHECK-LABEL: define void @landingpadInstr3
-; CHECK-SAME: personality i32 (...)* @__gxx_personality_v0
+; CHECK-SAME: personality ptr @__gxx_personality_v0
 define void @landingpadInstr3(i1 %cond1, <2 x i1> %cond2, <2 x i8> %x1, <2 x i8> %x2){
 entry:
-; CHECK: %res = landingpad { i8*, i32 }
+; CHECK: %res = landingpad { ptr, i32 }
   %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
-; CHECK: catch i8** @_ZTIi
+; CHECK: catch ptr @_ZTIi
   catch i8** @_ZTIi
-; CHECK: filter [1 x i8**] [i8** @_ZTId]
+; CHECK: filter [1 x ptr] [ptr @_ZTId]
   filter [1 x i8**] [i8** @_ZTId]
   ret void
 }
@@ -102,7 +102,7 @@ entry:
 ; CHECK-NEXT: %res10 = icmp sle i32 %x1, %x2
   %res10 = icmp sle i32 %x1, %x2
   
-; CHECK-NEXT: %res11 = icmp eq i32* %ptr1, %ptr2
+; CHECK-NEXT: %res11 = icmp eq ptr %ptr1, %ptr2
   %res11 = icmp eq i32* %ptr1, %ptr2
   
 ; CHECK-NEXT: %res12 = icmp eq <2 x i32> %vec1, %vec2
@@ -179,7 +179,7 @@ entry:
 ; CHECK-NEXT: %res2 = tail call i32 @test(i32 %x)
   %res2 = tail call i32 @test(i32 %x)
   
-; CHECK-NEXT: %res3 = call i32 (i8*, ...) @printf(i8* %msg, i32 12, i8 42)
+; CHECK-NEXT: %res3 = call i32 (ptr, ...) @printf(ptr %msg, i32 12, i8 42)
   %res3 = call i32 (i8*, ...) @printf(i8* %msg, i32 12, i8 42)
   
   ret void

diff  --git a/llvm/test/Bitcode/nocfivalue.ll b/llvm/test/Bitcode/nocfivalue.ll
index 153486c29c5c5..c689a160407de 100644
--- a/llvm/test/Bitcode/nocfivalue.ll
+++ b/llvm/test/Bitcode/nocfivalue.ll
@@ -1,11 +1,11 @@
 ; RUN: llvm-as < %s | llvm-dis | FileCheck %s
 ; RUN: verify-uselistorder %s
 
-; CHECK: @a = global [4 x void ()*] [void ()* no_cfi @f1, void ()* @f1, void ()* @f2, void ()* no_cfi @f2]
+; CHECK: @a = global [4 x ptr] [ptr no_cfi @f1, ptr @f1, ptr @f2, ptr no_cfi @f2]
 @a = global [4 x void ()*] [void ()* no_cfi @f1, void ()* @f1, void ()* @f2, void ()* no_cfi @f2]
-; CHECK: @b = constant void ()* no_cfi @f3
+; CHECK: @b = constant ptr no_cfi @f3
 @b = constant void ()* no_cfi @f3
-; CHECK: @c = constant void ()* @f3
+; CHECK: @c = constant ptr @f3
 @c = constant void ()* @f3
 
 ; CHECK: declare void @f1()
@@ -35,7 +35,7 @@ declare void @f5()
 
 define void @g() {
   %n = alloca void ()*, align 8
-  ; CHECK: store void ()* no_cfi @f5, void ()** %n, align 8
+  ; CHECK: store ptr no_cfi @f5, ptr %n, align 8
   store void ()* no_cfi @f5, void ()** %n, align 8
   %1 = load void ()*, void ()** %n
   call void %1()

diff  --git a/llvm/test/Bitcode/objectsize-upgrade-7.0.ll b/llvm/test/Bitcode/objectsize-upgrade-7.0.ll
index 7ed6f15f6839a..5390fc91f2b26 100644
--- a/llvm/test/Bitcode/objectsize-upgrade-7.0.ll
+++ b/llvm/test/Bitcode/objectsize-upgrade-7.0.ll
@@ -4,9 +4,9 @@
 
 define void @callit(i8* %ptr) {
   %sz = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true)
-  ; CHECK: %sz = call i64 @llvm.objectsize.i64.p0i8(i8* %ptr, i1 false, i1 true, i1 false)
+  ; CHECK: %sz = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
   ret void
 }
 
 declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1)
-; CHECK: declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg)
+; CHECK: declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)

diff  --git a/llvm/test/Bitcode/old-aliases.ll b/llvm/test/Bitcode/old-aliases.ll
index 1bcc4306477c9..a9db8ccd45f60 100644
--- a/llvm/test/Bitcode/old-aliases.ll
+++ b/llvm/test/Bitcode/old-aliases.ll
@@ -11,13 +11,13 @@
 ; CHECK: @v2 = global [1 x i32] zeroinitializer
 
 @v3 = alias i16, bitcast (i32* @v1 to i16*)
-; CHECK: @v3 = alias i16, bitcast (i32* @v1 to i16*)
+; CHECK: @v3 = alias i16, ptr @v1
 
 @v4 = alias i32, getelementptr ([1 x i32], [1 x i32]* @v2, i32 0, i32 0)
-; CHECK: @v4 = alias i32, getelementptr inbounds ([1 x i32], [1 x i32]* @v2, i32 0, i32 0)
+; CHECK: @v4 = alias i32, ptr @v2
 
 @v5 = alias i32, i32 addrspace(2)* addrspacecast (i32 addrspace(0)* @v1 to i32 addrspace(2)*)
-; CHECK: @v5 = alias i32, addrspacecast (i32* @v1 to i32 addrspace(2)*)
+; CHECK: @v5 = alias i32, addrspacecast (ptr @v1 to ptr addrspace(2))
 
 @v6 = alias i16, i16* @v3
-; CHECK: @v6 = alias i16, i16* @v3
+; CHECK: @v6 = alias i16, ptr @v3

diff  --git a/llvm/test/Bitcode/select.ll b/llvm/test/Bitcode/select.ll
index 666d2960fb5f5..7e62361822a87 100644
--- a/llvm/test/Bitcode/select.ll
+++ b/llvm/test/Bitcode/select.ll
@@ -14,5 +14,5 @@ define <2 x float> @f() {
 }
 
 ; CHECK: define <2 x float> @f() {
-; CHECK:   ret <2 x float> select (i1 ptrtoint (<2 x float> ()* @f to i1), <2 x float> <float 1.000000e+00, float 0.000000e+00>, <2 x float> zeroinitializer)
+; CHECK:   ret <2 x float> select (i1 ptrtoint (ptr @f to i1), <2 x float> <float 1.000000e+00, float 0.000000e+00>, <2 x float> zeroinitializer)
 ; CHECK: }

diff  --git a/llvm/test/Bitcode/standardCIntrinsic.3.2.ll b/llvm/test/Bitcode/standardCIntrinsic.3.2.ll
index d556682786d75..34ac6c49202de 100644
--- a/llvm/test/Bitcode/standardCIntrinsic.3.2.ll
+++ b/llvm/test/Bitcode/standardCIntrinsic.3.2.ll
@@ -7,7 +7,7 @@
 define void @memcpyintrinsic(i8* %dest, i8* %src, i32 %len) {
 entry:
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %len, i1 true)
+; CHECK: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 %len, i1 true)
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 %len, i1 true)
   
   ret void

diff  --git a/llvm/test/Bitcode/terminatorInstructions.3.2.ll b/llvm/test/Bitcode/terminatorInstructions.3.2.ll
index ba0f5ade2cc1a..6c8b4947e5be0 100644
--- a/llvm/test/Bitcode/terminatorInstructions.3.2.ll
+++ b/llvm/test/Bitcode/terminatorInstructions.3.2.ll
@@ -27,7 +27,7 @@ entry:
 
 define i32 @indirectbr(i8* %Addr){
 entry:
-; CHECK: indirectbr i8* %Addr, [label %bb1, label %bb2]
+; CHECK: indirectbr ptr %Addr, [label %bb1, label %bb2]
   indirectbr i8* %Addr, [ label %bb1, label %bb2 ]
   
   bb1:

diff  --git a/llvm/test/Bitcode/thinlto-function-summary.ll b/llvm/test/Bitcode/thinlto-function-summary.ll
index 68636ed192a19..e51cf10491083 100644
--- a/llvm/test/Bitcode/thinlto-function-summary.ll
+++ b/llvm/test/Bitcode/thinlto-function-summary.ll
@@ -60,8 +60,8 @@ entry:
 ; entries are committed.
 ; Check an anonymous function as well, since in that case only the alias
 ; ends up in the value symbol table and having a summary.
- at f = alias void (), void ()* @0   ; <void ()*> [#uses=0]
- at h = external global void ()*     ; <void ()*> [#uses=0]
+ at f = alias void (), void ()* @0   ; <ptr> [#uses=0]
+ at h = external global void ()*     ; <ptr> [#uses=0]
 
 define internal void @0() nounwind {
 entry:

diff  --git a/llvm/test/Bitcode/thinlto-summary-local-5.0.ll b/llvm/test/Bitcode/thinlto-summary-local-5.0.ll
index 7b58f3ec31bde..08b7e8a57037e 100644
--- a/llvm/test/Bitcode/thinlto-summary-local-5.0.ll
+++ b/llvm/test/Bitcode/thinlto-summary-local-5.0.ll
@@ -14,7 +14,7 @@ define void @foo() {
 ;CHECK-DAG: @bar = global i32 0
 
 @baz = alias i32, i32* @bar
-;CHECK-DAG: @baz = alias i32, i32* @bar
+;CHECK-DAG: @baz = alias i32, ptr @bar
 
 ;BCAN: <SOURCE_FILENAME
 ;BCAN-NEXT: <GLOBALVAR {{.*}} op7=0/>

diff  --git a/llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll b/llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll
index adc39fb392793..cd5f32ec68e4e 100644
--- a/llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll
+++ b/llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll
@@ -1,14 +1,14 @@
 ; RUN: llvm-dis < %S/upgrade-aarch64-ldstxr.bc | FileCheck %s
 
 define void @f(i32* %p) {
-; CHECK: call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32)
+; CHECK: call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32)
   %a = call i64 @llvm.aarch64.ldxr.p0i32(i32* %p)
-; CHECK: call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.aarch64.stxr.p0(i64 0, ptr elementtype(i32)
   %c = call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* %p)
 
-; CHECK: call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32)
+; CHECK: call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32)
   %a2 = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %p)
-; CHECK: call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* elementtype(i32)
+; CHECK: call i32 @llvm.aarch64.stlxr.p0(i64 0, ptr elementtype(i32)
   %c2 = call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* %p)
   ret void
 }

diff  --git a/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll b/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
index 234151aa8c36d..a2d171c173089 100644
--- a/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
+++ b/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
@@ -1,7 +1,7 @@
 ; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s
 
 define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
-; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
 ; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
 ; CHECK-NEXT:  %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
 ; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
@@ -12,7 +12,7 @@ ret <vscale x 32 x i8> %res
 }
 
 define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
-; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
 ; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
 ; CHECK-NEXT:  %3 = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> poison, <vscale x 16 x i8> %2, i64 0)
 ; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
@@ -25,7 +25,7 @@ ret <vscale x 48 x i8> %res
 }
 
 define <vscale x 64 x i8> @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
-; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
 ; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
 ; CHECK-NEXT:  %3 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> %2, i64 0)
 ; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
@@ -44,7 +44,7 @@ ret <vscale x 64 x i8> %res
 ; ldN intrinsic name without any element type
 define <vscale x 32 x i8> @ld2.nxv32i8_no_eltty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
 ; CHECK-LABEL:  @ld2.nxv32i8_no_eltty
-; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
 ; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
 ; CHECK-NEXT:  %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
 ; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
@@ -57,7 +57,7 @@ ret <vscale x 32 x i8> %res
 ; ldN instrinsic name with only output type
 define <vscale x 32 x i8> @ld2.nxv32i8_no_predty_pty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
 ; CHECK-LABEL:  @ld2.nxv32i8_no_predty_pty
-; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
 ; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
 ; CHECK-NEXT:  %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
 ; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1

diff  --git a/llvm/test/Bitcode/upgrade-annotation.ll b/llvm/test/Bitcode/upgrade-annotation.ll
index 87ea9773c7242..0240d45d04194 100644
--- a/llvm/test/Bitcode/upgrade-annotation.ll
+++ b/llvm/test/Bitcode/upgrade-annotation.ll
@@ -1,19 +1,16 @@
 ; Test upgrade of llvm.annotation intrinsics.
 ;
 ; RUN: llvm-as < %s | llvm-dis | FileCheck %s
-; RUN: llvm-dis --opaque-pointers=0 < %s.bc | FileCheck %s --check-prefix=TYPED
-; RUN: llvm-dis --opaque-pointers=1 < %s.bc | FileCheck %s
+; RUN: llvm-dis < %s.bc | FileCheck %s
 
 
-; TYPED: define i32 @f(i32 [[ARG0:%.*]], i8* [[ARG1:%.*]], i8* [[ARG2:%.*]], i32 [[ARG3:%.*]])
+; TYPED: define i32 @f(i32 [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
 ; CHECK: define i32 @f(i32 [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
 define i32 @f(i32 %arg0, ptr %arg1, ptr %arg2, i32 %arg3) {
   %result = call i32 @llvm.annotation.i32(i32 %arg0, ptr %arg1, ptr %arg2, i32 %arg3)
-  ; TYPED: [[RESULT:%.*]] = call i32 @llvm.annotation.i32.p0i8(i32 [[ARG0]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]])
   ; CHECK: [[RESULT:%.*]] = call i32 @llvm.annotation.i32.p0(i32 [[ARG0]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]])
   ret i32 %result
 }
 
 declare i32 @llvm.annotation.i32(i32, i8*, ptr, i32)
-; TYPED: declare i32 @llvm.annotation.i32.p0i8(i32, i8*, i8*, i32)
 ; CHECK: declare i32 @llvm.annotation.i32.p0(i32, ptr, ptr, i32)

diff  --git a/llvm/test/Bitcode/upgrade-arc-attachedcall-bundle.ll b/llvm/test/Bitcode/upgrade-arc-attachedcall-bundle.ll
index 7b012abf80850..635c8dd449bec 100644
--- a/llvm/test/Bitcode/upgrade-arc-attachedcall-bundle.ll
+++ b/llvm/test/Bitcode/upgrade-arc-attachedcall-bundle.ll
@@ -4,17 +4,17 @@
 ; RUN: verify-uselistorder %s.bc
 
 define i8* @invalid() {
-; CHECK-LABEL: define i8* @invalid() {
-; CHECK-NEXT:   %tmp0 = call i8* @foo(){{$}}
-; CHECK-NEXT:   ret i8* %tmp0
+; CHECK-LABEL: define ptr @invalid() {
+; CHECK-NEXT:   %tmp0 = call ptr @foo(){{$}}
+; CHECK-NEXT:   ret ptr %tmp0
   %tmp0 = call i8* @foo() [ "clang.arc.attachedcall"() ]
   ret i8* %tmp0
 }
 
 define i8* @valid() {
-; CHECK-LABEL: define i8* @valid() {
-; CHECK-NEXT:   %tmp0 = call i8* @foo() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
-; CHECK-NEXT:   ret i8* %tmp0
+; CHECK-LABEL: define ptr @valid() {
+; CHECK-NEXT:   %tmp0 = call ptr @foo() [ "clang.arc.attachedcall"(ptr @llvm.objc.retainAutoreleasedReturnValue) ]
+; CHECK-NEXT:   ret ptr %tmp0
   %tmp0 = call i8* @foo() [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
   ret i8* %tmp0
 }

diff  --git a/llvm/test/Bitcode/upgrade-arc-runtime-calls-bitcast.ll b/llvm/test/Bitcode/upgrade-arc-runtime-calls-bitcast.ll
index deac12d36df8d..944b98288634d 100644
--- a/llvm/test/Bitcode/upgrade-arc-runtime-calls-bitcast.ll
+++ b/llvm/test/Bitcode/upgrade-arc-runtime-calls-bitcast.ll
@@ -2,8 +2,8 @@ target triple = "arm64-apple-ios7.0"
 
 ; RUN: llvm-dis < %S/upgrade-arc-runtime-calls-bitcast.bc | FileCheck %s
 
-; CHECK: tail call i8* @objc_retain(i32 1)
-; CHECK: tail call i8* @objc_storeStrong(
+; CHECK: tail call ptr @objc_retain(i32 1)
+; CHECK: tail call ptr @objc_storeStrong(
 
 define void @testRuntimeCalls(i8* %a, i8** %b) {
   %v6 = tail call i8* @objc_retain(i32 1)

diff  --git a/llvm/test/Bitcode/upgrade-arc-runtime-calls.ll b/llvm/test/Bitcode/upgrade-arc-runtime-calls.ll
index d42c776ddc518..19f25f98953fa 100644
--- a/llvm/test/Bitcode/upgrade-arc-runtime-calls.ll
+++ b/llvm/test/Bitcode/upgrade-arc-runtime-calls.ll
@@ -55,71 +55,68 @@ unwindBlock:
 // Check that auto-upgrader converts function calls to intrinsic calls. Note that
 // the auto-upgrader doesn't touch invoke instructions.
 
-// ARC: define void @testRuntimeCalls(i8* %[[A:.*]], i8** %[[B:.*]], i8** %[[C:.*]], i32* %[[D:.*]], i32** %[[E:.*]]) personality
-// ARC: %[[V0:.*]] = tail call i8* @llvm.objc.autorelease(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.autoreleasePoolPop(i8* %[[A]])
-// ARC-NEXT: %[[V1:.*]] = tail call i8* @llvm.objc.autoreleasePoolPush()
-// ARC-NEXT: %[[V2:.*]] = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.copyWeak(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.destroyWeak(i8** %[[B]])
-// ARC-NEXT: %[[V100:.*]] = bitcast i32** %[[E]] to i8**
-// ARC-NEXT: %[[V101:.*]] = bitcast i32* %[[D]] to i8*
-// ARC-NEXT: %[[V102:.*]] = tail call i8* @llvm.objc.initWeak(i8** %[[V100]], i8* %[[V101]])
-// ARC-NEXT: %[[V103:.*]] = bitcast i8* %[[V102]] to i32*
-// ARC-NEXT: %[[V4:.*]] = tail call i8* @llvm.objc.loadWeak(i8** %[[B]])
-// ARC-NEXT: %[[V5:.*]] = tail call i8* @llvm.objc.loadWeakRetained(i8** %[[B]])
-// ARC-NEXT: tail call void @llvm.objc.moveWeak(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.release(i8* %[[A]])
-// ARC-NEXT: %[[V6:.*]] = tail call i8* @llvm.objc.retain(i8* %[[A]])
-// ARC-NEXT: %[[V7:.*]] = tail call i8* @llvm.objc.retainAutorelease(i8* %[[A]])
-// ARC-NEXT: %[[V8:.*]] = tail call i8* @llvm.objc.retainAutoreleaseReturnValue(i8* %[[A]])
-// ARC-NEXT: %[[V9:.*]] = tail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %[[A]])
-// ARC-NEXT: %[[V10:.*]] = tail call i8* @llvm.objc.retainBlock(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.storeStrong(i8** %[[B]], i8* %[[A]])
-// ARC-NEXT: %[[V11:.*]] = tail call i8* @llvm.objc.storeWeak(i8** %[[B]], i8* %[[A]])
-// ARC-NEXT: tail call void (...) @llvm.objc.clang.arc.use(i8* %[[A]])
-// ARC-NEXT: %[[V12:.*]] = tail call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %[[A]])
-// ARC-NEXT: %[[V13:.*]] = tail call i8* @llvm.objc.retainedObject(i8* %[[A]])
-// ARC-NEXT: %[[V14:.*]] = tail call i8* @llvm.objc.unretainedObject(i8* %[[A]])
-// ARC-NEXT: %[[V15:.*]] = tail call i8* @llvm.objc.unretainedPointer(i8* %[[A]])
-// ARC-NEXT: %[[V16:.*]] = tail call i8* @objc_retain.autorelease(i8* %[[A]])
-// ARC-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(i8* %[[A]])
-// ARC-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(i8* %[[A]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbstart(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbend(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbstart(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbend(i8** %[[B]], i8** %[[C]])
-// ARC-NEXT: invoke void @objc_autoreleasePoolPop(i8* %[[A]])
+// ARC: define void @testRuntimeCalls(ptr %[[A:.*]], ptr %[[B:.*]], ptr %[[C:.*]], ptr %[[D:.*]], ptr %[[E:.*]]) personality
+// ARC: %[[V0:.*]] = tail call ptr @llvm.objc.autorelease(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.autoreleasePoolPop(ptr %[[A]])
+// ARC-NEXT: %[[V1:.*]] = tail call ptr @llvm.objc.autoreleasePoolPush()
+// ARC-NEXT: %[[V2:.*]] = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.copyWeak(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.destroyWeak(ptr %[[B]])
+// ARC-NEXT: %[[V102:.*]] = tail call ptr @llvm.objc.initWeak(ptr %[[E]], ptr %[[D]])
+// ARC-NEXT: %[[V4:.*]] = tail call ptr @llvm.objc.loadWeak(ptr %[[B]])
+// ARC-NEXT: %[[V5:.*]] = tail call ptr @llvm.objc.loadWeakRetained(ptr %[[B]])
+// ARC-NEXT: tail call void @llvm.objc.moveWeak(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.release(ptr %[[A]])
+// ARC-NEXT: %[[V6:.*]] = tail call ptr @llvm.objc.retain(ptr %[[A]])
+// ARC-NEXT: %[[V7:.*]] = tail call ptr @llvm.objc.retainAutorelease(ptr %[[A]])
+// ARC-NEXT: %[[V8:.*]] = tail call ptr @llvm.objc.retainAutoreleaseReturnValue(ptr %[[A]])
+// ARC-NEXT: %[[V9:.*]] = tail call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %[[A]])
+// ARC-NEXT: %[[V10:.*]] = tail call ptr @llvm.objc.retainBlock(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.storeStrong(ptr %[[B]], ptr %[[A]])
+// ARC-NEXT: %[[V11:.*]] = tail call ptr @llvm.objc.storeWeak(ptr %[[B]], ptr %[[A]])
+// ARC-NEXT: tail call void (...) @llvm.objc.clang.arc.use(ptr %[[A]])
+// ARC-NEXT: %[[V12:.*]] = tail call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr %[[A]])
+// ARC-NEXT: %[[V13:.*]] = tail call ptr @llvm.objc.retainedObject(ptr %[[A]])
+// ARC-NEXT: %[[V14:.*]] = tail call ptr @llvm.objc.unretainedObject(ptr %[[A]])
+// ARC-NEXT: %[[V15:.*]] = tail call ptr @llvm.objc.unretainedPointer(ptr %[[A]])
+// ARC-NEXT: %[[V16:.*]] = tail call ptr @objc_retain.autorelease(ptr %[[A]])
+// ARC-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(ptr %[[A]])
+// ARC-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(ptr %[[A]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbstart(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.topdown.bbend(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbstart(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: tail call void @llvm.objc.arc.annotation.bottomup.bbend(ptr %[[B]], ptr %[[C]])
+// ARC-NEXT: invoke void @objc_autoreleasePoolPop(ptr %[[A]])
 
-// NOUPGRADE: define void @testRuntimeCalls(i8* %[[A:.*]], i8** %[[B:.*]], i8** %[[C:.*]], i32* %[[D:.*]], i32** %[[E:.*]]) personality
-// NOUPGRADE: %[[V0:.*]] = tail call i8* @objc_autorelease(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_autoreleasePoolPop(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V1:.*]] = tail call i8* @objc_autoreleasePoolPush()
-// NOUPGRADE-NEXT: %[[V2:.*]] = tail call i8* @objc_autoreleaseReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_copyWeak(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_destroyWeak(i8** %[[B]])
-// NOUPGRADE-NEXT: %[[V3:.*]] = tail call i32* @objc_initWeak(i32** %[[E]], i32* %[[D]])
-// NOUPGRADE-NEXT: %[[V4:.*]] = tail call i8* @objc_loadWeak(i8** %[[B]])
-// NOUPGRADE-NEXT: %[[V5:.*]] = tail call i8* @objc_loadWeakRetained(i8** %[[B]])
-// NOUPGRADE-NEXT: tail call void @objc_moveWeak(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_release(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V6:.*]] = tail call i8* @objc_retain(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V7:.*]] = tail call i8* @objc_retainAutorelease(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V8:.*]] = tail call i8* @objc_retainAutoreleaseReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V9:.*]] = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V10:.*]] = tail call i8* @objc_retainBlock(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_storeStrong(i8** %[[B]], i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V11:.*]] = tail call i8* @objc_storeWeak(i8** %[[B]], i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void (...) @llvm.objc.clang.arc.use(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V12:.*]] = tail call i8* @objc_unsafeClaimAutoreleasedReturnValue(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V13:.*]] = tail call i8* @objc_retainedObject(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V14:.*]] = tail call i8* @objc_unretainedObject(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V15:.*]] = tail call i8* @objc_unretainedPointer(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V16:.*]] = tail call i8* @objc_retain.autorelease(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(i8* %[[A]])
-// NOUPGRADE-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(i8* %[[A]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbstart(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbend(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbstart(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbend(i8** %[[B]], i8** %[[C]])
-// NOUPGRADE-NEXT: invoke void @objc_autoreleasePoolPop(i8* %[[A]])
+// NOUPGRADE: define void @testRuntimeCalls(ptr %[[A:.*]], ptr %[[B:.*]], ptr %[[C:.*]], ptr %[[D:.*]], ptr %[[E:.*]]) personality
+// NOUPGRADE: %[[V0:.*]] = tail call ptr @objc_autorelease(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_autoreleasePoolPop(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V1:.*]] = tail call ptr @objc_autoreleasePoolPush()
+// NOUPGRADE-NEXT: %[[V2:.*]] = tail call ptr @objc_autoreleaseReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_copyWeak(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_destroyWeak(ptr %[[B]])
+// NOUPGRADE-NEXT: %[[V3:.*]] = tail call ptr @objc_initWeak(ptr %[[E]], ptr %[[D]])
+// NOUPGRADE-NEXT: %[[V4:.*]] = tail call ptr @objc_loadWeak(ptr %[[B]])
+// NOUPGRADE-NEXT: %[[V5:.*]] = tail call ptr @objc_loadWeakRetained(ptr %[[B]])
+// NOUPGRADE-NEXT: tail call void @objc_moveWeak(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_release(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V6:.*]] = tail call ptr @objc_retain(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V7:.*]] = tail call ptr @objc_retainAutorelease(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V8:.*]] = tail call ptr @objc_retainAutoreleaseReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V9:.*]] = tail call ptr @objc_retainAutoreleasedReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V10:.*]] = tail call ptr @objc_retainBlock(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_storeStrong(ptr %[[B]], ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V11:.*]] = tail call ptr @objc_storeWeak(ptr %[[B]], ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void (...) @llvm.objc.clang.arc.use(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V12:.*]] = tail call ptr @objc_unsafeClaimAutoreleasedReturnValue(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V13:.*]] = tail call ptr @objc_retainedObject(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V14:.*]] = tail call ptr @objc_unretainedObject(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V15:.*]] = tail call ptr @objc_unretainedPointer(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V16:.*]] = tail call ptr @objc_retain.autorelease(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V17:.*]] = tail call i32 @objc_sync.enter(ptr %[[A]])
+// NOUPGRADE-NEXT: %[[V18:.*]] = tail call i32 @objc_sync.exit(ptr %[[A]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbstart(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_topdown_bbend(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbstart(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: tail call void @objc_arc_annotation_bottomup_bbend(ptr %[[B]], ptr %[[C]])
+// NOUPGRADE-NEXT: invoke void @objc_autoreleasePoolPop(ptr %[[A]])

diff  --git a/llvm/test/Bitcode/upgrade-elementtype.ll b/llvm/test/Bitcode/upgrade-elementtype.ll
index 16c360593882d..aa3a8bba065de 100644
--- a/llvm/test/Bitcode/upgrade-elementtype.ll
+++ b/llvm/test/Bitcode/upgrade-elementtype.ll
@@ -4,8 +4,8 @@
 
 define void @test(%struct.s* %arg) {
 ; CHECK-LABEL: define void @test
-; CHECK: %x = call %struct.s* @llvm.preserve.array.access.index.p0s_struct.ss.p0s_struct.ss(%struct.s* elementtype(%struct.s) %arg, i32 0, i32 2)
-; CHECK: %1 = call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.ss(%struct.s* elementtype(%struct.s) %x, i32 1, i32 1)
+; CHECK: %x = call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(%struct.s) %arg, i32 0, i32 2)
+; CHECK: %1 = call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s) %x, i32 1, i32 1)
   %x = call %struct.s* @llvm.preserve.array.access.index.p0s_struct.ss.p0s_struct.ss(%struct.s* %arg, i32 0, i32 2)
   call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.ss(%struct.s* %x, i32 1, i32 1)
   ret void

diff  --git a/llvm/test/Bitcode/upgrade-global-ctors.ll b/llvm/test/Bitcode/upgrade-global-ctors.ll
index 372b464649839..7c3776da41f56 100644
--- a/llvm/test/Bitcode/upgrade-global-ctors.ll
+++ b/llvm/test/Bitcode/upgrade-global-ctors.ll
@@ -2,4 +2,4 @@
 ; RUN:  verify-uselistorder < %s.bc
 
 ; The 2-field form @llvm.global_ctors will be upgraded when reading bitcode.
-; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
+; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer

diff  --git a/llvm/test/Bitcode/upgrade-global-dtors.ll b/llvm/test/Bitcode/upgrade-global-dtors.ll
index a0879bd1b8b03..5b5741de1f470 100644
--- a/llvm/test/Bitcode/upgrade-global-dtors.ll
+++ b/llvm/test/Bitcode/upgrade-global-dtors.ll
@@ -2,4 +2,4 @@
 ; RUN: verify-uselistorder < %s.bc
 
 ; The 2-field form @llvm.global_dtors will be upgraded when reading bitcode.
-; CHECK: @llvm.global_dtors = appending global [2 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* null, i8* null }, { i32, void ()*, i8* } { i32 65534, void ()* null, i8* null }]
+; CHECK: @llvm.global_dtors = appending global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr null, ptr null }, { i32, ptr, ptr } { i32 65534, ptr null, ptr null }]

diff  --git a/llvm/test/Bitcode/upgrade-inline-asm-elementtype.ll b/llvm/test/Bitcode/upgrade-inline-asm-elementtype.ll
index 5658103fbc80c..f12a54fe62287 100644
--- a/llvm/test/Bitcode/upgrade-inline-asm-elementtype.ll
+++ b/llvm/test/Bitcode/upgrade-inline-asm-elementtype.ll
@@ -1,12 +1,12 @@
 ; RUN: llvm-dis < %s.bc | FileCheck %s
 
-; CHECK: call void asm "", "=*rm,r"(i32* elementtype(i32) %p1, i32* %p2)
+; CHECK: call void asm "", "=*rm,r"(ptr elementtype(i32) %p1, ptr %p2)
 define void @test_call(i32* %p1, i32* %p2) {
 	call void asm "", "=*rm,r"(i32* %p1, i32* %p2)
   ret void
 }
 
-; CHECK: invoke void asm "", "=*rm,r"(i32* elementtype(i32) %p1, i32* %p2)
+; CHECK: invoke void asm "", "=*rm,r"(ptr elementtype(i32) %p1, ptr %p2)
 define void @test_invoke(i32* %p1, i32* %p2) personality i8* null {
 	invoke void asm "", "=*rm,r"(i32* %p1, i32* %p2)
       to label %cont unwind label %lpad
@@ -20,7 +20,7 @@ cont:
   ret void
 }
 
-; CHECK: callbr void asm "", "=*rm,r"(i32* elementtype(i32) %p1, i32* %p2)
+; CHECK: callbr void asm "", "=*rm,r"(ptr elementtype(i32) %p1, ptr %p2)
 define void @test_callbr(i32* %p1, i32* %p2) {
 	callbr void asm "", "=*rm,r"(i32* %p1, i32* %p2)
       to label %cont []

diff  --git a/llvm/test/Bitcode/upgrade-ptr-annotation.ll b/llvm/test/Bitcode/upgrade-ptr-annotation.ll
index 76f80416b5ca5..8ce3213279d68 100644
--- a/llvm/test/Bitcode/upgrade-ptr-annotation.ll
+++ b/llvm/test/Bitcode/upgrade-ptr-annotation.ll
@@ -8,19 +8,19 @@
 ; the function, but that makes it easier to test that they are handled
 ; correctly.
 define void @f1(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3) {
-;CHECK: @f1(i8* [[ARG0:%.*]], i8* [[ARG1:%.*]], i8* [[ARG2:%.*]], i32 [[ARG3:%.*]])
+;CHECK: @f1(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
   %t0 = call i8* @llvm.ptr.annotation.p0i8(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3)
-;CHECK:  call i8* @llvm.ptr.annotation.p0i8.p0i8(i8* [[ARG0]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK:  call ptr @llvm.ptr.annotation.p0.p0(ptr [[ARG0]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
 
   %arg0_p16 = bitcast i8* %arg0 to i16*
   %t1 = call i16* @llvm.ptr.annotation.p0i16(i16* %arg0_p16, i8* %arg1, i8* %arg2, i32 %arg3)
 ;CHECK:  [[ARG0_P16:%.*]] = bitcast
-;CHECK:  call i16* @llvm.ptr.annotation.p0i16.p0i8(i16* [[ARG0_P16]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK:  call ptr @llvm.ptr.annotation.p0.p0(ptr [[ARG0_P16]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
 
   %arg0_p256 = bitcast i8* %arg0 to i256*
   %t2 = call i256* @llvm.ptr.annotation.p0i256(i256* %arg0_p256, i8* %arg1, i8* %arg2, i32 %arg3)
 ;CHECK:  [[ARG0_P256:%.*]] = bitcast
-;CHECK:  call i256* @llvm.ptr.annotation.p0i256.p0i8(i256* [[ARG0_P256]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK:  call ptr @llvm.ptr.annotation.p0.p0(ptr [[ARG0_P256]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
   ret void
 }
 
@@ -31,16 +31,14 @@ define i16* @f2(i16* %x, i16* %y) {
   %cmp = icmp ugt i16* %t0, %t1
   %sel = select i1 %cmp, i16* %t0, i16* %t1
   ret i16* %sel
-; CHECK:  [[T0:%.*]] = call i16* @llvm.ptr.annotation.p0i16.p0i8(i16* %x, i8* undef, i8* undef, i32 undef, i8* null)
-; CHECK:  [[T1:%.*]] = call i16* @llvm.ptr.annotation.p0i16.p0i8(i16* %y, i8* undef, i8* undef, i32 undef, i8* null)
-; CHECK:  %cmp = icmp ugt i16* [[T0]], [[T1]]
-; CHECK:  %sel = select i1 %cmp, i16* [[T0]], i16* [[T1]]
-; CHECK:  ret i16* %sel
+; CHECK:  [[T0:%.*]] = call ptr @llvm.ptr.annotation.p0.p0(ptr %x, ptr undef, ptr undef, i32 undef, ptr null)
+; CHECK:  [[T1:%.*]] = call ptr @llvm.ptr.annotation.p0.p0(ptr %y, ptr undef, ptr undef, i32 undef, ptr null)
+; CHECK:  %cmp = icmp ugt ptr [[T0]], [[T1]]
+; CHECK:  %sel = select i1 %cmp, ptr [[T0]], ptr [[T1]]
+; CHECK:  ret ptr %sel
 }
 
+; CHECK: declare ptr   @llvm.ptr.annotation.p0.p0(ptr, ptr, ptr, i32, ptr)
 declare i8*   @llvm.ptr.annotation.p0i8(i8*, i8*, i8*, i32)
-; CHECK: declare i8*   @llvm.ptr.annotation.p0i8.p0i8(i8*, i8*, i8*, i32, i8*)
 declare i16*  @llvm.ptr.annotation.p0i16(i16*, i8*, i8*, i32)
-; CHECK: declare i16*   @llvm.ptr.annotation.p0i16.p0i8(i16*, i8*, i8*, i32, i8*)
 declare i256* @llvm.ptr.annotation.p0i256(i256*, i8*, i8*, i32)
-; CHECK: declare i256*   @llvm.ptr.annotation.p0i256.p0i8(i256*, i8*, i8*, i32, i8*)

diff  --git a/llvm/test/Bitcode/upgrade-tbaa.ll b/llvm/test/Bitcode/upgrade-tbaa.ll
index 7abfff868784f..7a70d9993df60 100644
--- a/llvm/test/Bitcode/upgrade-tbaa.ll
+++ b/llvm/test/Bitcode/upgrade-tbaa.ll
@@ -5,9 +5,9 @@
 define void @_Z4testPiPf(i32* nocapture %pI, float* nocapture %pF) #0 {
 entry:
   store i32 0, i32* %pI, align 4, !tbaa !{!"int", !0}
-  ; CHECK: store i32 0, i32* %pI, align 4, !tbaa [[TAG_INT:!.*]]
+  ; CHECK: store i32 0, ptr %pI, align 4, !tbaa [[TAG_INT:!.*]]
   store float 1.000000e+00, float* %pF, align 4, !tbaa !2
-  ; CHECK: store float 1.000000e+00, float* %pF, align 4, !tbaa [[TAG_FLOAT:!.*]]
+  ; CHECK: store float 1.000000e+00, ptr %pF, align 4, !tbaa [[TAG_FLOAT:!.*]]
   ret void
 }
 

diff  --git a/llvm/test/Bitcode/upgrade-var-annotation.ll b/llvm/test/Bitcode/upgrade-var-annotation.ll
index c1a0e706f4a29..2248b304fd6a5 100644
--- a/llvm/test/Bitcode/upgrade-var-annotation.ll
+++ b/llvm/test/Bitcode/upgrade-var-annotation.ll
@@ -5,12 +5,12 @@
 
 
 define void @f(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3) {
-;CHECK: @f(i8* [[ARG0:%.*]], i8* [[ARG1:%.*]], i8* [[ARG2:%.*]], i32 [[ARG3:%.*]])
+;CHECK: @f(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i32 [[ARG3:%.*]])
   call void @llvm.var.annotation(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3)
-;CHECK:  call void @llvm.var.annotation.p0i8.p0i8(i8* [[ARG0]], i8* [[ARG1]], i8* [[ARG2]], i32 [[ARG3]], i8* null)
+;CHECK:  call void @llvm.var.annotation.p0.p0(ptr [[ARG0]], ptr [[ARG1]], ptr [[ARG2]], i32 [[ARG3]], ptr null)
   ret void
 }
 
 ; Function Attrs: nofree nosync nounwind willreturn
 declare void @llvm.var.annotation(i8*, i8*, i8*, i32)
-; CHECK: declare void @llvm.var.annotation.p0i8.p0i8(i8*, i8*, i8*, i32, i8*)
+; CHECK: declare void @llvm.var.annotation.p0.p0(ptr, ptr, ptr, i32, ptr)

diff  --git a/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll b/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
index ad70f0542e884..fad7b8ea6a58b 100644
--- a/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
+++ b/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
@@ -10,21 +10,21 @@ define i32 @varArgIntrinsic(i32 %X, ...) {
   %ap = alloca i8*
   %ap2 = bitcast i8** %ap to i8*
 
-; CHECK: call void @llvm.va_start(i8* %ap2)
+; CHECK: call void @llvm.va_start(ptr %ap2)
   call void @llvm.va_start(i8* %ap2)
 
-; CHECK-NEXT: %tmp = va_arg i8** %ap, i32
+; CHECK-NEXT: %tmp = va_arg ptr %ap, i32
   %tmp = va_arg i8** %ap, i32
 
   %aq = alloca i8*
   %aq2 = bitcast i8** %aq to i8*
 
-; CHECK: call void @llvm.va_copy(i8* %aq2, i8* %ap2)
+; CHECK: call void @llvm.va_copy(ptr %aq2, ptr %ap2)
   call void @llvm.va_copy(i8* %aq2, i8* %ap2)
-; CHECK-NEXT: call void @llvm.va_end(i8* %aq2)
+; CHECK-NEXT: call void @llvm.va_end(ptr %aq2)
   call void @llvm.va_end(i8* %aq2)
 
-; CHECK-NEXT:  call void @llvm.va_end(i8* %ap2)
+; CHECK-NEXT:  call void @llvm.va_end(ptr %ap2)
   call void @llvm.va_end(i8* %ap2)
   ret i32 %tmp
 }

diff  --git a/llvm/test/Bitcode/weak-cmpxchg-upgrade.ll b/llvm/test/Bitcode/weak-cmpxchg-upgrade.ll
index 76b857b5e2217..9e75fb93dc50e 100644
--- a/llvm/test/Bitcode/weak-cmpxchg-upgrade.ll
+++ b/llvm/test/Bitcode/weak-cmpxchg-upgrade.ll
@@ -5,7 +5,7 @@
 ; before the IR change on this file.
 
 define i32 @test(i32* %addr, i32 %old, i32 %new) {
-; CHECK:  [[TMP:%.*]] = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst monotonic
+; CHECK:  [[TMP:%.*]] = cmpxchg ptr %addr, i32 %old, i32 %new seq_cst monotonic
 ; CHECK:  %val = extractvalue { i32, i1 } [[TMP]], 0
   %val = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst monotonic
   ret i32 %val


        


More information about the llvm-commits mailing list