[llvm] 46584de - AArch64/GlobalISel: Convert tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 2 13:19:54 PST 2022


Author: Matt Arsenault
Date: 2022-12-02T16:19:38-05:00
New Revision: 46584de02c1a38a0ccde85cb5c16331380966c36

URL: https://github.com/llvm/llvm-project/commit/46584de02c1a38a0ccde85cb5c16331380966c36
DIFF: https://github.com/llvm/llvm-project/commit/46584de02c1a38a0ccde85cb5c16331380966c36.diff

LOG: AArch64/GlobalISel: Convert tests to opaque pointers

inttoptr_add.ll had a mangled bitcast constantexpr.
translate-gep.ll: Restored a 0 GEP

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
    llvm/test/CodeGen/AArch64/GlobalISel/assert-align.ll
    llvm/test/CodeGen/AArch64/GlobalISel/builtin-return-address-pacret.ll
    llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-const-bitcast-func.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
    llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-of-shifted-dbg-value-fallback.ll
    llvm/test/CodeGen/AArch64/GlobalISel/combiner-load-store-indexing.ll
    llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll
    llvm/test/CodeGen/AArch64/GlobalISel/darwin-tls-call-clobber.ll
    llvm/test/CodeGen/AArch64/GlobalISel/debug-cpp.ll
    llvm/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
    llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca-lifetime.ll
    llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
    llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
    llvm/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll
    llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-atomic-metadata.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-block-order.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-dilocation.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-extract-used-by-dbg.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-indirect-br-repeated-block.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-invoke-probabilities.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-load-metadata.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-localescape.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-max-address-space.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memcpy-inline.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memfunc-undef.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-op-intrinsics.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-objects.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-bittest.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-tbaa.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-volatile-load-pr36018.ll
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-weird-alloca-size.ll
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
    llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
    llvm/test/CodeGen/AArch64/GlobalISel/memcpy_chk_no_tail.ll
    llvm/test/CodeGen/AArch64/GlobalISel/merge-stores-truncating.ll
    llvm/test/CodeGen/AArch64/GlobalISel/no-neon-no-fp.ll
    llvm/test/CodeGen/AArch64/GlobalISel/ret-1x-vec.ll
    llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
    llvm/test/CodeGen/AArch64/GlobalISel/select-bitfield-insert.ll
    llvm/test/CodeGen/AArch64/GlobalISel/select-frameaddr.ll
    llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddr.ll
    llvm/test/CodeGen/AArch64/GlobalISel/store-merging.ll
    llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll
    llvm/test/CodeGen/AArch64/GlobalISel/swiftself.ll
    llvm/test/CodeGen/AArch64/GlobalISel/translate-constant-dag.ll
    llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
    llvm/test/CodeGen/AArch64/GlobalISel/translate-ret.ll
    llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
    llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll
    llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll
    llvm/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
    llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
index 556c9ece831a..dec4318f57a0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
@@ -5,7 +5,7 @@
 ; RUN: llc < %s -mtriple=arm64-linux-gnu -verify-machineinstrs -O0 -mattr=+lse -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=CHECK-CAS-O0
 @var = global i128 0
 
-define void @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+define void @val_compare_and_swap(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-LLSC-O1-LABEL: val_compare_and_swap:
 ; CHECK-LLSC-O1:       // %bb.0:
 ; CHECK-LLSC-O1-NEXT:  .LBB0_1: // =>This Inner Loop Header: Depth=1
@@ -84,13 +84,13 @@ define void @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
 ; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
 
-%pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+%pair = cmpxchg ptr %p, i128 %oldval, i128 %newval acquire acquire
   %val = extractvalue { i128, i1 } %pair, 0
-  store i128 %val, i128* %p
+  store i128 %val, ptr %p
   ret void
 }
 
-define void @val_compare_and_swap_monotonic_seqcst(i128* %p, i128 %oldval, i128 %newval) {
+define void @val_compare_and_swap_monotonic_seqcst(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-LLSC-O1-LABEL: val_compare_and_swap_monotonic_seqcst:
 ; CHECK-LLSC-O1:       // %bb.0:
 ; CHECK-LLSC-O1-NEXT:  .LBB1_1: // =>This Inner Loop Header: Depth=1
@@ -169,13 +169,13 @@ define void @val_compare_and_swap_monotonic_seqcst(i128* %p, i128 %oldval, i128
 ; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
 
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval monotonic seq_cst
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval monotonic seq_cst
   %val = extractvalue { i128, i1 } %pair, 0
-  store i128 %val, i128* %p
+  store i128 %val, ptr %p
   ret void
 }
 
-define void @val_compare_and_swap_release_acquire(i128* %p, i128 %oldval, i128 %newval) {
+define void @val_compare_and_swap_release_acquire(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-LLSC-O1-LABEL: val_compare_and_swap_release_acquire:
 ; CHECK-LLSC-O1:       // %bb.0:
 ; CHECK-LLSC-O1-NEXT:  .LBB2_1: // =>This Inner Loop Header: Depth=1
@@ -254,13 +254,13 @@ define void @val_compare_and_swap_release_acquire(i128* %p, i128 %oldval, i128 %
 ; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
 
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release acquire
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval release acquire
   %val = extractvalue { i128, i1 } %pair, 0
-  store i128 %val, i128* %p
+  store i128 %val, ptr %p
   ret void
 }
 
-define void @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval) {
+define void @val_compare_and_swap_monotonic(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-LLSC-O1-LABEL: val_compare_and_swap_monotonic:
 ; CHECK-LLSC-O1:       // %bb.0:
 ; CHECK-LLSC-O1-NEXT:  .LBB3_1: // =>This Inner Loop Header: Depth=1
@@ -338,13 +338,13 @@ define void @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval
 ; CHECK-CAS-O0-NEXT:    str q0, [x0]
 ; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release acquire
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval release acquire
   %val = extractvalue { i128, i1 } %pair, 0
-  store i128 %val, i128* %p
+  store i128 %val, ptr %p
   ret void
 }
 
-define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
+define void @atomic_load_relaxed(i64, i64, ptr %p, ptr %p2) {
 ; CHECK-LLSC-O1-LABEL: atomic_load_relaxed:
 ; CHECK-LLSC-O1:       // %bb.0:
 ; CHECK-LLSC-O1-NEXT:  .LBB4_1: // %atomicrmw.start
@@ -411,12 +411,12 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ; CHECK-CAS-O0-NEXT:    str q0, [x3]
 ; CHECK-CAS-O0-NEXT:    ret
 
-    %r = load atomic i128, i128* %p monotonic, align 16
-    store i128 %r, i128* %p2
+    %r = load atomic i128, ptr %p monotonic, align 16
+    store i128 %r, ptr %p2
     ret void
 }
 
-define i128 @val_compare_and_swap_return(i128* %p, i128 %oldval, i128 %newval) {
+define i128 @val_compare_and_swap_return(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-LLSC-O1-LABEL: val_compare_and_swap_return:
 ; CHECK-LLSC-O1:       // %bb.0:
 ; CHECK-LLSC-O1-NEXT:  .LBB5_1: // =>This Inner Loop Header: Depth=1
@@ -482,7 +482,7 @@ define i128 @val_compare_and_swap_return(i128* %p, i128 %oldval, i128 %newval) {
 ; CHECK-CAS-O0-NEXT:    mov x1, x3
 ; CHECK-CAS-O0-NEXT:    ret
 
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval acquire acquire
   %val = extractvalue { i128, i1 } %pair, 0
   ret i128 %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
index 058738c1ba25..a8d61acab183 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
@@ -4,7 +4,7 @@
 ; RUN: llc < %s -mtriple=arm64-apple-ios -global-isel -global-isel-abort=1 -mcpu=apple-a13 -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK-LSE-O1
 ; RUN: llc < %s -mtriple=arm64-apple-ios -global-isel -global-isel-abort=1 -mcpu=apple-a13 -O0 -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK-LSE-O0
 
-define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
+define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) #0 {
 ; CHECK-NOLSE-O1-LABEL: val_compare_and_swap:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB0_1: ; %cmpxchg.start
@@ -49,12 +49,12 @@ define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
 ; CHECK-LSE-O0-NEXT:    mov x0, x1
 ; CHECK-LSE-O0-NEXT:    casa w0, w2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
+define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) #0 {
 ; CHECK-NOLSE-O1-LABEL: val_compare_and_swap_from_load:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    ldr w9, [x2]
@@ -103,13 +103,13 @@ define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
 ; CHECK-LSE-O0-NEXT:    ldr w8, [x2]
 ; CHECK-LSE-O0-NEXT:    casa w0, w8, [x9]
 ; CHECK-LSE-O0-NEXT:    ret
-  %new = load i32, i32* %pnew
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
+  %new = load i32, ptr %pnew
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
+define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) #0 {
 ; CHECK-NOLSE-O1-LABEL: val_compare_and_swap_rel:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB2_1: ; %cmpxchg.start
@@ -154,12 +154,12 @@ define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
 ; CHECK-LSE-O0-NEXT:    mov x0, x1
 ; CHECK-LSE-O0-NEXT:    casal w0, w2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acq_rel monotonic
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
+define i64 @val_compare_and_swap_64(ptr %p, i64 %cmp, i64 %new) #0 {
 ; CHECK-NOLSE-O1-LABEL: val_compare_and_swap_64:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB3_1: ; %cmpxchg.start
@@ -204,12 +204,12 @@ define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
 ; CHECK-LSE-O0-NEXT:    mov x0, x1
 ; CHECK-LSE-O0-NEXT:    cas x0, x2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic
+  %pair = cmpxchg ptr %p, i64 %cmp, i64 %new monotonic monotonic
   %val = extractvalue { i64, i1 } %pair, 0
   ret i64 %val
 }
 
-define i64 @val_compare_and_swap_64_monotonic_seqcst(i64* %p, i64 %cmp, i64 %new) #0 {
+define i64 @val_compare_and_swap_64_monotonic_seqcst(ptr %p, i64 %cmp, i64 %new) #0 {
 ; CHECK-NOLSE-O1-LABEL: val_compare_and_swap_64_monotonic_seqcst:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB4_1: ; %cmpxchg.start
@@ -254,12 +254,12 @@ define i64 @val_compare_and_swap_64_monotonic_seqcst(i64* %p, i64 %cmp, i64 %new
 ; CHECK-LSE-O0-NEXT:    mov x0, x1
 ; CHECK-LSE-O0-NEXT:    casal x0, x2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic seq_cst
+  %pair = cmpxchg ptr %p, i64 %cmp, i64 %new monotonic seq_cst
   %val = extractvalue { i64, i1 } %pair, 0
   ret i64 %val
 }
 
-define i64 @val_compare_and_swap_64_release_acquire(i64* %p, i64 %cmp, i64 %new) #0 {
+define i64 @val_compare_and_swap_64_release_acquire(ptr %p, i64 %cmp, i64 %new) #0 {
 ; CHECK-NOLSE-O1-LABEL: val_compare_and_swap_64_release_acquire:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB5_1: ; %cmpxchg.start
@@ -304,12 +304,12 @@ define i64 @val_compare_and_swap_64_release_acquire(i64* %p, i64 %cmp, i64 %new)
 ; CHECK-LSE-O0-NEXT:    mov x0, x1
 ; CHECK-LSE-O0-NEXT:    casal x0, x2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new release acquire
+  %pair = cmpxchg ptr %p, i64 %cmp, i64 %new release acquire
   %val = extractvalue { i64, i1 } %pair, 0
   ret i64 %val
 }
 
-define i32 @fetch_and_nand(i32* %p) #0 {
+define i32 @fetch_and_nand(ptr %p) #0 {
 ; CHECK-NOLSE-O1-LABEL: fetch_and_nand:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB6_1: ; %atomicrmw.start
@@ -398,11 +398,11 @@ define i32 @fetch_and_nand(i32* %p) #0 {
 ; CHECK-LSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
 ; CHECK-LSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-LSE-O0-NEXT:    ret
-  %val = atomicrmw nand i32* %p, i32 7 release
+  %val = atomicrmw nand ptr %p, i32 7 release
   ret i32 %val
 }
 
-define i64 @fetch_and_nand_64(i64* %p) #0 {
+define i64 @fetch_and_nand_64(ptr %p) #0 {
 ; CHECK-NOLSE-O1-LABEL: fetch_and_nand_64:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB7_1: ; %atomicrmw.start
@@ -491,11 +491,11 @@ define i64 @fetch_and_nand_64(i64* %p) #0 {
 ; CHECK-LSE-O0-NEXT:    ldr x0, [sp, #8] ; 8-byte Folded Reload
 ; CHECK-LSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-LSE-O0-NEXT:    ret
-  %val = atomicrmw nand i64* %p, i64 7 acq_rel
+  %val = atomicrmw nand ptr %p, i64 7 acq_rel
   ret i64 %val
 }
 
-define i32 @fetch_and_or(i32* %p) #0 {
+define i32 @fetch_and_or(ptr %p) #0 {
 ; CHECK-NOLSE-O1-LABEL: fetch_and_or:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    mov w9, #5
@@ -557,11 +557,11 @@ define i32 @fetch_and_or(i32* %p) #0 {
 ; CHECK-LSE-O0-NEXT:    mov w8, #5
 ; CHECK-LSE-O0-NEXT:    ldsetal w8, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %val = atomicrmw or i32* %p, i32 5 seq_cst
+  %val = atomicrmw or ptr %p, i32 5 seq_cst
   ret i32 %val
 }
 
-define i64 @fetch_and_or_64(i64* %p) #0 {
+define i64 @fetch_and_or_64(ptr %p) #0 {
 ; CHECK-NOLSE-O1-LABEL: fetch_and_or_64:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB9_1: ; %atomicrmw.start
@@ -622,7 +622,7 @@ define i64 @fetch_and_or_64(i64* %p) #0 {
 ; CHECK-LSE-O0-NEXT:    ; kill: def $x8 killed $w8
 ; CHECK-LSE-O0-NEXT:    ldset x8, x0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %val = atomicrmw or i64* %p, i64 7 monotonic
+  %val = atomicrmw or ptr %p, i64 7 monotonic
   ret i64 %val
 }
 
@@ -683,7 +683,7 @@ define void @seq_cst_fence() #0 {
    ret void
 }
 
-define i32 @atomic_load(i32* %p) #0 {
+define i32 @atomic_load(ptr %p) #0 {
 ; CHECK-NOLSE-LABEL: atomic_load:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    ldar w0, [x0]
@@ -698,11 +698,11 @@ define i32 @atomic_load(i32* %p) #0 {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldar w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-   %r = load atomic i32, i32* %p seq_cst, align 4
+   %r = load atomic i32, ptr %p seq_cst, align 4
    ret i32 %r
 }
 
-define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
+define i8 @atomic_load_relaxed_8(ptr %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_load_relaxed_8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -754,25 +754,25 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
 ; CHECK-LSE-O0-NEXT:    ldrb w9, [x9]
 ; CHECK-LSE-O0-NEXT:    add w0, w8, w9, uxtb
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
-  %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1
+  %ptr_unsigned = getelementptr i8, ptr %p, i32 4095
+  %val_unsigned = load atomic i8, ptr %ptr_unsigned monotonic, align 1
 
-  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
-  %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1
+  %ptr_regoff = getelementptr i8, ptr %p, i32 %off32
+  %val_regoff = load atomic i8, ptr %ptr_regoff unordered, align 1
   %tot1 = add i8 %val_unsigned, %val_regoff
 
-  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
-  %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1
+  %ptr_unscaled = getelementptr i8, ptr %p, i32 -256
+  %val_unscaled = load atomic i8, ptr %ptr_unscaled monotonic, align 1
   %tot2 = add i8 %tot1, %val_unscaled
 
-  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
-  %val_random = load atomic i8, i8* %ptr_random unordered, align 1
+  %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+  %val_random = load atomic i8, ptr %ptr_random unordered, align 1
   %tot3 = add i8 %tot2, %val_random
 
   ret i8 %tot3
 }
 
-define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
+define i16 @atomic_load_relaxed_16(ptr %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_load_relaxed_16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -824,25 +824,25 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
 ; CHECK-LSE-O0-NEXT:    ldrh w9, [x9]
 ; CHECK-LSE-O0-NEXT:    add w0, w8, w9, uxth
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
-  %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2
+  %ptr_unsigned = getelementptr i16, ptr %p, i32 4095
+  %val_unsigned = load atomic i16, ptr %ptr_unsigned monotonic, align 2
 
-  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
-  %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2
+  %ptr_regoff = getelementptr i16, ptr %p, i32 %off32
+  %val_regoff = load atomic i16, ptr %ptr_regoff unordered, align 2
   %tot1 = add i16 %val_unsigned, %val_regoff
 
-  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
-  %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2
+  %ptr_unscaled = getelementptr i16, ptr %p, i32 -128
+  %val_unscaled = load atomic i16, ptr %ptr_unscaled monotonic, align 2
   %tot2 = add i16 %tot1, %val_unscaled
 
-  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
-  %val_random = load atomic i16, i16* %ptr_random unordered, align 2
+  %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+  %val_random = load atomic i16, ptr %ptr_random unordered, align 2
   %tot3 = add i16 %tot2, %val_random
 
   ret i16 %tot3
 }
 
-define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
+define i32 @atomic_load_relaxed_32(ptr %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_load_relaxed_32:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -890,25 +890,25 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
 ; CHECK-LSE-O0-NEXT:    ldr w9, [x9]
 ; CHECK-LSE-O0-NEXT:    add w0, w8, w9
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
-  %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4
+  %ptr_unsigned = getelementptr i32, ptr %p, i32 4095
+  %val_unsigned = load atomic i32, ptr %ptr_unsigned monotonic, align 4
 
-  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
-  %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4
+  %ptr_regoff = getelementptr i32, ptr %p, i32 %off32
+  %val_regoff = load atomic i32, ptr %ptr_regoff unordered, align 4
   %tot1 = add i32 %val_unsigned, %val_regoff
 
-  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
-  %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4
+  %ptr_unscaled = getelementptr i32, ptr %p, i32 -64
+  %val_unscaled = load atomic i32, ptr %ptr_unscaled monotonic, align 4
   %tot2 = add i32 %tot1, %val_unscaled
 
-  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
-  %val_random = load atomic i32, i32* %ptr_random unordered, align 4
+  %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+  %val_random = load atomic i32, ptr %ptr_random unordered, align 4
   %tot3 = add i32 %tot2, %val_random
 
   ret i32 %tot3
 }
 
-define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
+define i64 @atomic_load_relaxed_64(ptr %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_load_relaxed_64:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -956,26 +956,26 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
 ; CHECK-LSE-O0-NEXT:    ldr x9, [x9]
 ; CHECK-LSE-O0-NEXT:    add x0, x8, x9
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
-  %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8
+  %ptr_unsigned = getelementptr i64, ptr %p, i32 4095
+  %val_unsigned = load atomic i64, ptr %ptr_unsigned monotonic, align 8
 
-  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
-  %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8
+  %ptr_regoff = getelementptr i64, ptr %p, i32 %off32
+  %val_regoff = load atomic i64, ptr %ptr_regoff unordered, align 8
   %tot1 = add i64 %val_unsigned, %val_regoff
 
-  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
-  %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8
+  %ptr_unscaled = getelementptr i64, ptr %p, i32 -32
+  %val_unscaled = load atomic i64, ptr %ptr_unscaled monotonic, align 8
   %tot2 = add i64 %tot1, %val_unscaled
 
-  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
-  %val_random = load atomic i64, i64* %ptr_random unordered, align 8
+  %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+  %val_random = load atomic i64, ptr %ptr_random unordered, align 8
   %tot3 = add i64 %tot2, %val_random
 
   ret i64 %tot3
 }
 
 
-define void @atomc_store(i32* %p) #0 {
+define void @atomc_store(ptr %p) #0 {
 ; CHECK-NOLSE-LABEL: atomc_store:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    mov w8, #4
@@ -993,11 +993,11 @@ define void @atomc_store(i32* %p) #0 {
 ; CHECK-LSE-O0-NEXT:    mov w8, #4
 ; CHECK-LSE-O0-NEXT:    stlr w8, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-   store atomic i32 4, i32* %p seq_cst, align 4
+   store atomic i32 4, ptr %p seq_cst, align 4
    ret void
 }
 
-define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 {
+define void @atomic_store_relaxed_8(ptr %p, i32 %off32, i8 %val) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_store_relaxed_8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -1033,22 +1033,22 @@ define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 {
 ; CHECK-LSE-O0-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O0-NEXT:    strb w2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
-  store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1
+  %ptr_unsigned = getelementptr i8, ptr %p, i32 4095
+  store atomic i8 %val, ptr %ptr_unsigned monotonic, align 1
 
-  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
-  store atomic i8 %val, i8* %ptr_regoff unordered, align 1
+  %ptr_regoff = getelementptr i8, ptr %p, i32 %off32
+  store atomic i8 %val, ptr %ptr_regoff unordered, align 1
 
-  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
-  store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
+  %ptr_unscaled = getelementptr i8, ptr %p, i32 -256
+  store atomic i8 %val, ptr %ptr_unscaled monotonic, align 1
 
-  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
-  store atomic i8 %val, i8* %ptr_random unordered, align 1
+  %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+  store atomic i8 %val, ptr %ptr_random unordered, align 1
 
   ret void
 }
 
-define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 {
+define void @atomic_store_relaxed_16(ptr %p, i32 %off32, i16 %val) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_store_relaxed_16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -1084,22 +1084,22 @@ define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 {
 ; CHECK-LSE-O0-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O0-NEXT:    strh w2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
-  store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2
+  %ptr_unsigned = getelementptr i16, ptr %p, i32 4095
+  store atomic i16 %val, ptr %ptr_unsigned monotonic, align 2
 
-  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
-  store atomic i16 %val, i16* %ptr_regoff unordered, align 2
+  %ptr_regoff = getelementptr i16, ptr %p, i32 %off32
+  store atomic i16 %val, ptr %ptr_regoff unordered, align 2
 
-  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
-  store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
+  %ptr_unscaled = getelementptr i16, ptr %p, i32 -128
+  store atomic i16 %val, ptr %ptr_unscaled monotonic, align 2
 
-  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
-  store atomic i16 %val, i16* %ptr_random unordered, align 2
+  %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+  store atomic i16 %val, ptr %ptr_random unordered, align 2
 
   ret void
 }
 
-define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 {
+define void @atomic_store_relaxed_32(ptr %p, i32 %off32, i32 %val) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_store_relaxed_32:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -1135,22 +1135,22 @@ define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 {
 ; CHECK-LSE-O0-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O0-NEXT:    str w2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
-  store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4
+  %ptr_unsigned = getelementptr i32, ptr %p, i32 4095
+  store atomic i32 %val, ptr %ptr_unsigned monotonic, align 4
 
-  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
-  store atomic i32 %val, i32* %ptr_regoff unordered, align 4
+  %ptr_regoff = getelementptr i32, ptr %p, i32 %off32
+  store atomic i32 %val, ptr %ptr_regoff unordered, align 4
 
-  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
-  store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
+  %ptr_unscaled = getelementptr i32, ptr %p, i32 -64
+  store atomic i32 %val, ptr %ptr_unscaled monotonic, align 4
 
-  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
-  store atomic i32 %val, i32* %ptr_random unordered, align 4
+  %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+  store atomic i32 %val, ptr %ptr_random unordered, align 4
 
   ret void
 }
 
-define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 {
+define void @atomic_store_relaxed_64(ptr %p, i32 %off32, i64 %val) #0 {
 ; CHECK-NOLSE-O1-LABEL: atomic_store_relaxed_64:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
@@ -1186,22 +1186,22 @@ define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 {
 ; CHECK-LSE-O0-NEXT:    add x8, x0, #291, lsl #12 ; =1191936
 ; CHECK-LSE-O0-NEXT:    str x2, [x8]
 ; CHECK-LSE-O0-NEXT:    ret
-  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
-  store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8
+  %ptr_unsigned = getelementptr i64, ptr %p, i32 4095
+  store atomic i64 %val, ptr %ptr_unsigned monotonic, align 8
 
-  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
-  store atomic i64 %val, i64* %ptr_regoff unordered, align 8
+  %ptr_regoff = getelementptr i64, ptr %p, i32 %off32
+  store atomic i64 %val, ptr %ptr_regoff unordered, align 8
 
-  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
-  store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
+  %ptr_unscaled = getelementptr i64, ptr %p, i32 -32
+  store atomic i64 %val, ptr %ptr_unscaled monotonic, align 8
 
-  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
-  store atomic i64 %val, i64* %ptr_random unordered, align 8
+  %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+  store atomic i64 %val, ptr %ptr_random unordered, align 8
 
   ret void
 }
 
-define i32 @load_zext(i8* %p8, i16* %p16) {
+define i32 @load_zext(ptr %p8, ptr %p16) {
 ; CHECK-NOLSE-O1-LABEL: load_zext:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    ldarb w8, [x0]
@@ -1229,17 +1229,17 @@ define i32 @load_zext(i8* %p8, i16* %p16) {
 ; CHECK-LSE-O0-NEXT:    ldrh w8, [x1]
 ; CHECK-LSE-O0-NEXT:    add w0, w8, w9, uxtb
 ; CHECK-LSE-O0-NEXT:    ret
-  %val1.8 = load atomic i8, i8* %p8 acquire, align 1
+  %val1.8 = load atomic i8, ptr %p8 acquire, align 1
   %val1 = zext i8 %val1.8 to i32
 
-  %val2.16 = load atomic i16, i16* %p16 unordered, align 2
+  %val2.16 = load atomic i16, ptr %p16 unordered, align 2
   %val2 = zext i16 %val2.16 to i32
 
   %res = add i32 %val1, %val2
   ret i32 %res
 }
 
-define { i32, i64 } @load_acq(i32* %p32, i64* %p64) {
+define { i32, i64 } @load_acq(ptr %p32, ptr %p64) {
 ; CHECK-NOLSE-LABEL: load_acq:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    ldar w0, [x0]
@@ -1257,16 +1257,16 @@ define { i32, i64 } @load_acq(i32* %p32, i64* %p64) {
 ; CHECK-LSE-O0-NEXT:    ldar w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ldapr x1, [x1]
 ; CHECK-LSE-O0-NEXT:    ret
-  %val32 = load atomic i32, i32* %p32 seq_cst, align 4
+  %val32 = load atomic i32, ptr %p32 seq_cst, align 4
   %tmp = insertvalue { i32, i64 } undef, i32 %val32, 0
 
-  %val64 = load atomic i64, i64* %p64 acquire, align 8
+  %val64 = load atomic i64, ptr %p64 acquire, align 8
   %res = insertvalue { i32, i64 } %tmp, i64 %val64, 1
 
   ret { i32, i64 } %res
 }
 
-define i32 @load_sext(i8* %p8, i16* %p16) {
+define i32 @load_sext(ptr %p8, ptr %p16) {
 ; CHECK-NOLSE-O1-LABEL: load_sext:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    ldarb w8, [x0]
@@ -1298,17 +1298,17 @@ define i32 @load_sext(i8* %p8, i16* %p16) {
 ; CHECK-LSE-O0-NEXT:    sxth w8, w8
 ; CHECK-LSE-O0-NEXT:    add w0, w8, w9, sxtb
 ; CHECK-LSE-O0-NEXT:    ret
-  %val1.8 = load atomic i8, i8* %p8 acquire, align 1
+  %val1.8 = load atomic i8, ptr %p8 acquire, align 1
   %val1 = sext i8 %val1.8 to i32
 
-  %val2.16 = load atomic i16, i16* %p16 unordered, align 2
+  %val2.16 = load atomic i16, ptr %p16 unordered, align 2
   %val2 = sext i16 %val2.16 to i32
 
   %res = add i32 %val1, %val2
   ret i32 %res
 }
 
-define void @store_trunc(i32 %val, i8* %p8, i16* %p16) {
+define void @store_trunc(i32 %val, ptr %p8, ptr %p16) {
 ; CHECK-NOLSE-LABEL: store_trunc:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    stlrb w0, [x1]
@@ -1327,15 +1327,15 @@ define void @store_trunc(i32 %val, i8* %p8, i16* %p16) {
 ; CHECK-LSE-O0-NEXT:    strh w0, [x2]
 ; CHECK-LSE-O0-NEXT:    ret
   %val8 = trunc i32 %val to i8
-  store atomic i8 %val8, i8* %p8 seq_cst, align 1
+  store atomic i8 %val8, ptr %p8 seq_cst, align 1
 
   %val16 = trunc i32 %val to i16
-  store atomic i16 %val16, i16* %p16 monotonic, align 2
+  store atomic i16 %val16, ptr %p16 monotonic, align 2
 
   ret void
 }
 
-define i8 @atomicrmw_add_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_add_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB27_1: ; %atomicrmw.start
@@ -1397,11 +1397,11 @@ define i8 @atomicrmw_add_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldaddalb w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw add i8* %ptr, i8 %rhs seq_cst
+  %res = atomicrmw add ptr %ptr, i8 %rhs seq_cst
   ret i8 %res
 }
 
-define i8 @atomicrmw_xchg_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_xchg_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_xchg_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    ; kill: def $w1 killed $w1 def $x1
@@ -1462,11 +1462,11 @@ define i8 @atomicrmw_xchg_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    swpb w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw xchg i8* %ptr, i8 %rhs monotonic
+  %res = atomicrmw xchg ptr %ptr, i8 %rhs monotonic
   ret i8 %res
 }
 
-define i8 @atomicrmw_sub_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_sub_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_sub_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB29_1: ; %atomicrmw.start
@@ -1530,11 +1530,11 @@ define i8 @atomicrmw_sub_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0-NEXT:    neg w8, w1
 ; CHECK-LSE-O0-NEXT:    ldaddab w8, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw sub i8* %ptr, i8 %rhs acquire
+  %res = atomicrmw sub ptr %ptr, i8 %rhs acquire
   ret i8 %res
 }
 
-define i8 @atomicrmw_and_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_and_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_and_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB30_1: ; %atomicrmw.start
@@ -1598,11 +1598,11 @@ define i8 @atomicrmw_and_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0-NEXT:    mvn w8, w1
 ; CHECK-LSE-O0-NEXT:    ldclrlb w8, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw and i8* %ptr, i8 %rhs release
+  %res = atomicrmw and ptr %ptr, i8 %rhs release
   ret i8 %res
 }
 
-define i8 @atomicrmw_or_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_or_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_or_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB31_1: ; %atomicrmw.start
@@ -1664,11 +1664,11 @@ define i8 @atomicrmw_or_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldsetalb w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw or i8* %ptr, i8 %rhs seq_cst
+  %res = atomicrmw or ptr %ptr, i8 %rhs seq_cst
   ret i8 %res
 }
 
-define i8 @atomicrmw_xor_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_xor_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_xor_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB32_1: ; %atomicrmw.start
@@ -1730,11 +1730,11 @@ define i8 @atomicrmw_xor_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldeorb w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw xor i8* %ptr, i8 %rhs monotonic
+  %res = atomicrmw xor ptr %ptr, i8 %rhs monotonic
   ret i8 %res
 }
 
-define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_min_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_min_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB33_1: ; %atomicrmw.start
@@ -1803,11 +1803,11 @@ define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldsminab w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw min i8* %ptr, i8 %rhs acquire
+  %res = atomicrmw min ptr %ptr, i8 %rhs acquire
   ret i8 %res
 }
 
-define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_max_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_max_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB34_1: ; %atomicrmw.start
@@ -1876,11 +1876,11 @@ define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldsmaxlb w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw max i8* %ptr, i8 %rhs release
+  %res = atomicrmw max ptr %ptr, i8 %rhs release
   ret i8 %res
 }
 
-define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_umin_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_umin_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    and w9, w1, #0xff
@@ -1950,11 +1950,11 @@ define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    lduminalb w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw umin i8* %ptr, i8 %rhs seq_cst
+  %res = atomicrmw umin ptr %ptr, i8 %rhs seq_cst
   ret i8 %res
 }
 
-define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_umax_i8(ptr %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_umax_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    and w9, w1, #0xff
@@ -2024,11 +2024,11 @@ define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldumaxb w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw umax i8* %ptr, i8 %rhs monotonic
+  %res = atomicrmw umax ptr %ptr, i8 %rhs monotonic
   ret i8 %res
 }
 
-define i16 @atomicrmw_add_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_add_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB37_1: ; %atomicrmw.start
@@ -2090,11 +2090,11 @@ define i16 @atomicrmw_add_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldaddalh w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw add i16* %ptr, i16 %rhs seq_cst
+  %res = atomicrmw add ptr %ptr, i16 %rhs seq_cst
   ret i16 %res
 }
 
-define i16 @atomicrmw_xchg_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_xchg_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_xchg_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    ; kill: def $w1 killed $w1 def $x1
@@ -2155,11 +2155,11 @@ define i16 @atomicrmw_xchg_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    swph w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw xchg i16* %ptr, i16 %rhs monotonic
+  %res = atomicrmw xchg ptr %ptr, i16 %rhs monotonic
   ret i16 %res
 }
 
-define i16 @atomicrmw_sub_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_sub_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_sub_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB39_1: ; %atomicrmw.start
@@ -2223,11 +2223,11 @@ define i16 @atomicrmw_sub_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0-NEXT:    neg w8, w1
 ; CHECK-LSE-O0-NEXT:    ldaddah w8, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw sub i16* %ptr, i16 %rhs acquire
+  %res = atomicrmw sub ptr %ptr, i16 %rhs acquire
   ret i16 %res
 }
 
-define i16 @atomicrmw_and_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_and_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_and_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB40_1: ; %atomicrmw.start
@@ -2291,11 +2291,11 @@ define i16 @atomicrmw_and_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0-NEXT:    mvn w8, w1
 ; CHECK-LSE-O0-NEXT:    ldclrlh w8, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw and i16* %ptr, i16 %rhs release
+  %res = atomicrmw and ptr %ptr, i16 %rhs release
   ret i16 %res
 }
 
-define i16 @atomicrmw_or_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_or_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_or_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB41_1: ; %atomicrmw.start
@@ -2357,11 +2357,11 @@ define i16 @atomicrmw_or_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldsetalh w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw or i16* %ptr, i16 %rhs seq_cst
+  %res = atomicrmw or ptr %ptr, i16 %rhs seq_cst
   ret i16 %res
 }
 
-define i16 @atomicrmw_xor_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_xor_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_xor_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB42_1: ; %atomicrmw.start
@@ -2423,11 +2423,11 @@ define i16 @atomicrmw_xor_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldeorh w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw xor i16* %ptr, i16 %rhs monotonic
+  %res = atomicrmw xor ptr %ptr, i16 %rhs monotonic
   ret i16 %res
 }
 
-define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_min_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_min_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB43_1: ; %atomicrmw.start
@@ -2496,11 +2496,11 @@ define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldsminah w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw min i16* %ptr, i16 %rhs acquire
+  %res = atomicrmw min ptr %ptr, i16 %rhs acquire
   ret i16 %res
 }
 
-define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_max_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_max_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:  LBB44_1: ; %atomicrmw.start
@@ -2569,11 +2569,11 @@ define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldsmaxlh w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw max i16* %ptr, i16 %rhs release
+  %res = atomicrmw max ptr %ptr, i16 %rhs release
   ret i16 %res
 }
 
-define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_umin_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_umin_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    and w9, w1, #0xffff
@@ -2643,11 +2643,11 @@ define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    lduminalh w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw umin i16* %ptr, i16 %rhs seq_cst
+  %res = atomicrmw umin ptr %ptr, i16 %rhs seq_cst
   ret i16 %res
 }
 
-define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_umax_i16(ptr %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O1-LABEL: atomicrmw_umax_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    and w9, w1, #0xffff
@@ -2717,11 +2717,11 @@ define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldumaxh w1, w0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = atomicrmw umax i16* %ptr, i16 %rhs monotonic
+  %res = atomicrmw umax ptr %ptr, i16 %rhs monotonic
   ret i16 %res
 }
 
-define { i8, i1 } @cmpxchg_i8(i8* %ptr, i8 %desired, i8 %new) {
+define { i8, i1 } @cmpxchg_i8(ptr %ptr, i8 %desired, i8 %new) {
 ; CHECK-NOLSE-O1-LABEL: cmpxchg_i8:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    mov x8, x0
@@ -2783,11 +2783,11 @@ define { i8, i1 } @cmpxchg_i8(i8* %ptr, i8 %desired, i8 %new) {
 ; CHECK-LSE-O0-NEXT:    cset w8, eq
 ; CHECK-LSE-O0-NEXT:    and w1, w8, #0x1
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = cmpxchg i8* %ptr, i8 %desired, i8 %new monotonic monotonic
+  %res = cmpxchg ptr %ptr, i8 %desired, i8 %new monotonic monotonic
   ret { i8, i1 } %res
 }
 
-define { i16, i1 } @cmpxchg_i16(i16* %ptr, i16 %desired, i16 %new) {
+define { i16, i1 } @cmpxchg_i16(ptr %ptr, i16 %desired, i16 %new) {
 ; CHECK-NOLSE-O1-LABEL: cmpxchg_i16:
 ; CHECK-NOLSE-O1:       ; %bb.0:
 ; CHECK-NOLSE-O1-NEXT:    mov x8, x0
@@ -2849,11 +2849,11 @@ define { i16, i1 } @cmpxchg_i16(i16* %ptr, i16 %desired, i16 %new) {
 ; CHECK-LSE-O0-NEXT:    cset w8, eq
 ; CHECK-LSE-O0-NEXT:    and w1, w8, #0x1
 ; CHECK-LSE-O0-NEXT:    ret
-  %res = cmpxchg i16* %ptr, i16 %desired, i16 %new monotonic monotonic
+  %res = cmpxchg ptr %ptr, i16 %desired, i16 %new monotonic monotonic
   ret { i16, i1 } %res
 }
 
-define internal double @bitcast_to_double(i64* %ptr) {
+define internal double @bitcast_to_double(ptr %ptr) {
 ; CHECK-NOLSE-LABEL: bitcast_to_double:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    ldar x8, [x0]
@@ -2871,12 +2871,12 @@ define internal double @bitcast_to_double(i64* %ptr) {
 ; CHECK-LSE-O0-NEXT:    ldar x8, [x0]
 ; CHECK-LSE-O0-NEXT:    fmov d0, x8
 ; CHECK-LSE-O0-NEXT:    ret
-  %load = load atomic i64, i64* %ptr seq_cst, align 8
+  %load = load atomic i64, ptr %ptr seq_cst, align 8
   %bitcast = bitcast i64 %load to double
   ret double %bitcast
 }
 
-define internal float @bitcast_to_float(i32* %ptr) {
+define internal float @bitcast_to_float(ptr %ptr) {
 ; CHECK-NOLSE-LABEL: bitcast_to_float:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    ldar w8, [x0]
@@ -2894,12 +2894,12 @@ define internal float @bitcast_to_float(i32* %ptr) {
 ; CHECK-LSE-O0-NEXT:    ldar w8, [x0]
 ; CHECK-LSE-O0-NEXT:    fmov s0, w8
 ; CHECK-LSE-O0-NEXT:    ret
-  %load = load atomic i32, i32* %ptr seq_cst, align 8
+  %load = load atomic i32, ptr %ptr seq_cst, align 8
   %bitcast = bitcast i32 %load to float
   ret float %bitcast
 }
 
-define internal half @bitcast_to_half(i16* %ptr) {
+define internal half @bitcast_to_half(ptr %ptr) {
 ; CHECK-NOLSE-LABEL: bitcast_to_half:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    ldarh w8, [x0]
@@ -2920,12 +2920,12 @@ define internal half @bitcast_to_half(i16* %ptr) {
 ; CHECK-LSE-O0-NEXT:    fmov s0, w8
 ; CHECK-LSE-O0-NEXT:    ; kill: def $h0 killed $h0 killed $s0
 ; CHECK-LSE-O0-NEXT:    ret
-  %load = load atomic i16, i16* %ptr seq_cst, align 8
+  %load = load atomic i16, ptr %ptr seq_cst, align 8
   %bitcast = bitcast i16 %load to half
   ret half %bitcast
 }
 
-define internal i64* @inttoptr(i64* %ptr) {
+define internal ptr @inttoptr(ptr %ptr) {
 ; CHECK-NOLSE-LABEL: inttoptr:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    ldar x0, [x0]
@@ -2940,12 +2940,12 @@ define internal i64* @inttoptr(i64* %ptr) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldar x0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %load = load atomic i64, i64* %ptr seq_cst, align 8
-  %bitcast = inttoptr i64 %load to i64*
-  ret i64* %bitcast
+  %load = load atomic i64, ptr %ptr seq_cst, align 8
+  %bitcast = inttoptr i64 %load to ptr
+  ret ptr %bitcast
 }
 
-define internal i64* @load_ptr(i64** %ptr) {
+define internal ptr @load_ptr(ptr %ptr) {
 ; CHECK-NOLSE-LABEL: load_ptr:
 ; CHECK-NOLSE:       ; %bb.0:
 ; CHECK-NOLSE-NEXT:    ldar x0, [x0]
@@ -2960,8 +2960,8 @@ define internal i64* @load_ptr(i64** %ptr) {
 ; CHECK-LSE-O0:       ; %bb.0:
 ; CHECK-LSE-O0-NEXT:    ldar x0, [x0]
 ; CHECK-LSE-O0-NEXT:    ret
-  %load = load atomic i64*, i64** %ptr seq_cst, align 8
-  ret i64* %load
+  %load = load atomic ptr, ptr %ptr seq_cst, align 8
+  ret ptr %load
 }
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
index dc9370cf17ac..375b7f8134c3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
@@ -43,7 +43,7 @@ define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3,
 }
 
 
-define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
+define ptr @args_ptrs(ptr %x0, ptr %x1, ptr %x2, ptr %x3,
   ; CHECK-LABEL: name: args_ptrs
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
@@ -58,8 +58,8 @@ define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(p0) = COPY $x7
   ; CHECK-NEXT:   $x0 = COPY [[COPY]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-                      [3 x float]* %x4, double* %x5, i8* %x6, i8* %x7) {
-  ret i8* %x0
+                      ptr %x4, ptr %x5, ptr %x6, ptr %x7) {
+  ret ptr %x0
 }
 
 define [1 x double] @args_arr([1 x double] %d0) {
@@ -134,7 +134,7 @@ define void @test_stack_ext_needed() {
 }
 
 ; Check that we can lower incoming i128 types into constituent s64 gprs.
-define void @callee_s128(i128 %a, i128 %b, i128 *%ptr) {
+define void @callee_s128(i128 %a, i128 %b, ptr %ptr) {
   ; CHECK-LABEL: name: callee_s128
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x0, $x1, $x2, $x3, $x4
@@ -148,12 +148,12 @@ define void @callee_s128(i128 %a, i128 %b, i128 *%ptr) {
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(p0) = COPY $x4
   ; CHECK-NEXT:   G_STORE [[MV1]](s128), [[COPY4]](p0) :: (store (s128) into %ir.ptr)
   ; CHECK-NEXT:   RET_ReallyLR
-  store i128 %b, i128 *%ptr
+  store i128 %b, ptr %ptr
   ret void
 }
 
 ; Check we can lower outgoing s128 arguments into s64 gprs.
-define void @caller_s128(i128 *%ptr) {
+define void @caller_s128(ptr %ptr) {
   ; CHECK-LABEL: name: caller_s128
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x0
@@ -171,8 +171,8 @@ define void @caller_s128(i128 *%ptr) {
   ; CHECK-NEXT:   BL @callee_s128, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   RET_ReallyLR
-  %v = load i128, i128 *%ptr
-  call void @callee_s128(i128 %v, i128 %v, i128 *%ptr)
+  %v = load i128, ptr %ptr
+  call void @callee_s128(i128 %v, i128 %v, ptr %ptr)
   ret void
 }
 
@@ -237,9 +237,9 @@ define void @arg_v2i64(<2 x i64> %arg) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   G_STORE [[COPY]](<2 x s64>), [[DEF]](p0) :: (store (<2 x s64>) into `<2 x i64>* undef`)
+  ; CHECK-NEXT:   G_STORE [[COPY]](<2 x s64>), [[DEF]](p0) :: (store (<2 x s64>) into `ptr undef`)
   ; CHECK-NEXT:   RET_ReallyLR
-  store <2 x i64> %arg, <2 x i64>* undef
+  store <2 x i64> %arg, ptr undef
   ret void
 }
 
@@ -254,9 +254,9 @@ define void @arg_v8i64(<8 x i64> %arg) {
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>), [[COPY2]](<2 x s64>), [[COPY3]](<2 x s64>)
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<8 x s64>), [[DEF]](p0) :: (store (<8 x s64>) into `<8 x i64>* undef`)
+  ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<8 x s64>), [[DEF]](p0) :: (store (<8 x s64>) into `ptr undef`)
   ; CHECK-NEXT:   RET_ReallyLR
-  store <8 x i64> %arg, <8 x i64>* undef
+  store <8 x i64> %arg, ptr undef
   ret void
 }
 
@@ -268,9 +268,9 @@ define void @arg_v4f32(<4 x float> %arg) {
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
   ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   G_STORE [[BITCAST]](<4 x s32>), [[DEF]](p0) :: (store (<4 x s32>) into `<4 x float>* undef`)
+  ; CHECK-NEXT:   G_STORE [[BITCAST]](<4 x s32>), [[DEF]](p0) :: (store (<4 x s32>) into `ptr undef`)
   ; CHECK-NEXT:   RET_ReallyLR
-  store <4 x float> %arg, <4 x float>* undef
+  store <4 x float> %arg, ptr undef
   ret void
 }
 
@@ -289,8 +289,8 @@ define void @ret_arg_v16f32(<16 x float> %arg) {
   ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY3]](<2 x s64>)
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[BITCAST]](<4 x s32>), [[BITCAST1]](<4 x s32>), [[BITCAST2]](<4 x s32>), [[BITCAST3]](<4 x s32>)
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<16 x s32>), [[DEF]](p0) :: (store (<16 x s32>) into `<16 x float>* undef`)
+  ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<16 x s32>), [[DEF]](p0) :: (store (<16 x s32>) into `ptr undef`)
   ; CHECK-NEXT:   RET_ReallyLR
-  store <16 x float> %arg, <16 x float>* undef
+  store <16 x float> %arg, ptr undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index 136c7e5c759c..fdde869224c1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -24,7 +24,7 @@ define void @test_write_register_intrin() {
   ret void
 }
 
- at _ZTIi = external global i8*
+ at _ZTIi = external global ptr
 declare i32 @__gxx_personality_v0(...)
 
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2:_(<2 x p0>) = G_INSERT_VECTOR_ELT %0:_, %{{[0-9]+}}:_(p0), %{{[0-9]+}}:_(s32) (in function: vector_of_pointers_insertelement)
@@ -34,20 +34,20 @@ define void @vector_of_pointers_insertelement() {
   br label %end
 
 block:
-  %dummy = insertelement <2 x i16*> %vec, i16* null, i32 0
-  store <2 x i16*> %dummy, <2 x i16*>* undef
+  %dummy = insertelement <2 x ptr> %vec, ptr null, i32 0
+  store <2 x ptr> %dummy, ptr undef
   ret void
 
 end:
-  %vec = load <2 x i16*>, <2 x i16*>* undef
+  %vec = load <2 x ptr>, ptr undef
   br label %block
 }
 
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: RET_ReallyLR implicit $x0 (in function: strict_align_feature)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for strict_align_feature
 ; FALLBACK-WITH-REPORT-OUT-LABEL: strict_align_feature
-define i64 @strict_align_feature(i64* %p) #0 {
-  %x = load i64, i64* %p, align 1
+define i64 @strict_align_feature(ptr %p) #0 {
+  %x = load i64, ptr %p, align 1
   ret i64 %x
 }
 
@@ -64,24 +64,24 @@ entry:
 
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower function{{.*}}scalable_arg
 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg
-define <vscale x 16 x i8> @scalable_arg(<vscale x 16 x i1> %pred, i8* %addr) #1 {
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
+define <vscale x 16 x i8> @scalable_arg(<vscale x 16 x i1> %pred, ptr %addr) #1 {
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
   ret <vscale x 16 x i8> %res
 }
 
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower function{{.*}}scalable_ret
 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_ret
-define <vscale x 16 x i8> @scalable_ret(i8* %addr) #1 {
+define <vscale x 16 x i8> @scalable_ret(ptr %addr) #1 {
   %pred = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 0)
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
   ret <vscale x 16 x i8> %res
 }
 
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction{{.*}}scalable_call
 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_call
-define i8 @scalable_call(i8* %addr) #1 {
+define i8 @scalable_call(ptr %addr) #1 {
   %pred = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 0)
-  %vec = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
+  %vec = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
   %res = extractelement <vscale x 16 x i8> %vec, i32 0
   ret i8 %res
 }
@@ -90,7 +90,7 @@ define i8 @scalable_call(i8* %addr) #1 {
 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_alloca
 define void @scalable_alloca() #1 {
   %local0 = alloca <vscale x 16 x i8>
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local0
+  load volatile <vscale x 16 x i8>, ptr %local0
   ret void
 }
 
@@ -98,9 +98,9 @@ define void @scalable_alloca() #1 {
 ; FALLBACK-WITH-REPORT-OUT-LABEL: asm_indirect_output
 define void @asm_indirect_output() {
 entry:
-  %ap = alloca i8*, align 8
-  %0 = load i8*, i8** %ap, align 8
-  call void asm sideeffect "", "=*r|m,0,~{memory}"(i8** elementtype(i8*) %ap, i8* %0)
+  %ap = alloca ptr, align 8
+  %0 = load ptr, ptr %ap, align 8
+  call void asm sideeffect "", "=*r|m,0,~{memory}"(ptr elementtype(ptr) %ap, ptr %0)
   ret void
 }
 
@@ -109,22 +109,20 @@ entry:
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction:{{.*}}ld64b{{.*}}asm_output_ls64
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for asm_output_ls64
 ; FALLBACK-WITH-REPORT-OUT-LABEL: asm_output_ls64
-define void @asm_output_ls64(%struct.foo* %output, i8* %addr) #2 {
+define void @asm_output_ls64(ptr %output, ptr %addr) #2 {
 entry:
-  %val = call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(i8* %addr)
-  %outcast = bitcast %struct.foo* %output to i512*
-  store i512 %val, i512* %outcast, align 8
+  %val = call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(ptr %addr)
+  store i512 %val, ptr %output, align 8
   ret void
 }
 
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction:{{.*}}st64b{{.*}}asm_input_ls64
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for asm_input_ls64
 ; FALLBACK-WITH-REPORT-OUT-LABEL: asm_input_ls64
-define void @asm_input_ls64(%struct.foo* %input, i8* %addr) #2 {
+define void @asm_input_ls64(ptr %input, ptr %addr) #2 {
 entry:
-  %incast = bitcast %struct.foo* %input to i512*
-  %val = load i512, i512* %incast, align 8
-  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %val, i8* %addr)
+  %val = load i512, ptr %input, align 8
+  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %val, ptr %addr)
   ret void
 }
 
@@ -132,12 +130,12 @@ entry:
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for umul_s128
 ; FALLBACK-WITH-REPORT-OUT-LABEL: umul_s128
 declare {i128, i1} @llvm.umul.with.overflow.i128(i128, i128) nounwind readnone
-define zeroext i1 @umul_s128(i128 %v1, i128* %res) {
+define zeroext i1 @umul_s128(i128 %v1, ptr %res) {
 entry:
   %t = call {i128, i1} @llvm.umul.with.overflow.i128(i128 %v1, i128 2)
   %val = extractvalue {i128, i1} %t, 0
   %obit = extractvalue {i128, i1} %t, 1
-  store i128 %val, i128* %res
+  store i128 %val, ptr %res
   ret i1 %obit
 }
 
@@ -145,13 +143,13 @@ entry:
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for gc_intr
 ; FALLBACK-WITH-REPORT-OUT-LABEL: gc_intr
 
-declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, i32()*, i32 immarg, i32 immarg, ...)
+declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 declare i32 @llvm.experimental.gc.result(token)
 
 declare i32 @extern_returning_i32()
 
 define i32 @gc_intr() gc "statepoint-example" {
-   %statepoint_token = call token (i64, i32, i32()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, i32()* elementtype(i32 ()) @extern_returning_i32, i32 0, i32 0, i32 0, i32 0) [ "deopt"() ]
+   %statepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(i32 ()) @extern_returning_i32, i32 0, i32 0, i32 0, i32 0) [ "deopt"() ]
    %ret = call i32 (token) @llvm.experimental.gc.result(token %statepoint_token)
    ret i32 %ret
 }
@@ -160,4 +158,4 @@ attributes #1 = { "target-features"="+sve" }
 attributes #2 = { "target-features"="+ls64" }
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
index 1466bd23ac5f..a0e5d8b3b2d1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
@@ -4,7 +4,7 @@
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--"
 
-define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
+define i32 @cse_gep(ptr %ptr, i32 %idx) {
   ; O0-LABEL: name: cse_gep
   ; O0: bb.1 (%ir-block.0):
   ; O0-NEXT:   liveins: $w1, $x0
@@ -44,10 +44,10 @@ define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
   ; O3-NEXT:   $w0 = COPY [[ADD]](s32)
   ; O3-NEXT:   RET_ReallyLR implicit $w0
   %sidx = sext i32 %idx to i64
-  %gep1 = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i64 %sidx, i64 0
-  %v1 = load i32, i32* %gep1
-  %gep2 = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i64 %sidx, i64 1
-  %v2 = load i32, i32* %gep2
+  %gep1 = getelementptr inbounds [4 x i32], ptr %ptr, i64 %sidx, i64 0
+  %v1 = load i32, ptr %gep1
+  %gep2 = getelementptr inbounds [4 x i32], ptr %ptr, i64 %sidx, i64 1
+  %v2 = load i32, ptr %gep2
   %res = add i32 %v1, %v2
   ret i32 %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll
index 6708b8c7ecb0..4e709ec684dc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-stackprotect.ll
@@ -13,11 +13,11 @@
 ; CHECK: [[GUARD_SLOT:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.StackGuardSlot
 ; CHECK: [[GUARD:%[0-9]+]]:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load (p0) from @__stack_chk_guard)
 ; CHECK: G_STORE [[GUARD]](p0), [[GUARD_SLOT]](p0) :: (volatile store (p0) into %stack.0.StackGuardSlot)
-declare void @llvm.stackprotector(i8*, i8**)
+declare void @llvm.stackprotector(ptr, ptr)
 define void @test_stack_guard_remat2() {
-  %StackGuardSlot = alloca i8*
-  call void @llvm.stackprotector(i8* undef, i8** %StackGuardSlot)
+  %StackGuardSlot = alloca ptr
+  call void @llvm.stackprotector(ptr undef, ptr %StackGuardSlot)
   ret void
 }
 
- at __stack_chk_guard = external global i64*
+ at __stack_chk_guard = external global ptr

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
index 17457c063734..6dd1533475f8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll
@@ -228,12 +228,12 @@ return:
   ret i32 %retval.0
 }
 
-%0 = type { i32, i32* }
-%1 = type { i32*, i32, i32 }
+%0 = type { i32, ptr }
+%1 = type { ptr, i32, i32 }
 
 @global = external hidden constant [55 x %0], align 8
 
-define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
+define void @jt_multiple_jump_tables(ptr %arg, i32 %arg1, ptr %arg2) {
   ; CHECK-LABEL: name: jt_multiple_jump_tables
   ; CHECK: bb.1.bb:
   ; CHECK-NEXT:   liveins: $w1, $x0, $x2
@@ -816,17 +816,16 @@ define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
   ; CHECK-NEXT:   G_BR %bb.59
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.58.bb64:
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY [[FRAME_INDEX]](p0)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   $w0 = COPY [[COPY1]](s32)
-  ; CHECK-NEXT:   $x1 = COPY [[COPY5]](p0)
+  ; CHECK-NEXT:   $x1 = COPY [[FRAME_INDEX]](p0)
   ; CHECK-NEXT:   BL @baz, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $x1
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   $x0 = COPY [[COPY]](p0)
-  ; CHECK-NEXT:   $x1 = COPY [[COPY5]](p0)
+  ; CHECK-NEXT:   $x1 = COPY [[FRAME_INDEX]](p0)
   ; CHECK-NEXT:   BL @wibble, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   G_BR %bb.59
   ; CHECK-NEXT: {{  $}}
@@ -1061,33 +1060,31 @@ bb56:                                             ; preds = %bb
 
 bb57:                                             ; preds = %bb56, %bb55, %bb54, %bb53, %bb52, %bb51, %bb50, %bb49, %bb48, %bb47, %bb46, %bb45, %bb44, %bb43, %bb42, %bb41, %bb40, %bb39, %bb38, %bb37, %bb36, %bb35, %bb34, %bb33, %bb32, %bb31, %bb30, %bb29, %bb28, %bb27, %bb26, %bb25, %bb24, %bb23, %bb22, %bb21, %bb20, %bb19, %bb18, %bb17, %bb16, %bb15, %bb14, %bb13, %bb12, %bb11, %bb10, %bb9, %bb8, %bb7, %bb6, %bb5, %bb4, %bb3, %bb
   %tmp58 = phi i64 [ 0, %bb ], [ 1, %bb3 ], [ 2, %bb4 ], [ 3, %bb5 ], [ 4, %bb6 ], [ 5, %bb7 ], [ 6, %bb8 ], [ 7, %bb9 ], [ 8, %bb10 ], [ 9, %bb11 ], [ 10, %bb12 ], [ 11, %bb13 ], [ 12, %bb14 ], [ 13, %bb15 ], [ 14, %bb16 ], [ 15, %bb17 ], [ 16, %bb18 ], [ 17, %bb19 ], [ 18, %bb20 ], [ 19, %bb21 ], [ 20, %bb22 ], [ 21, %bb23 ], [ 22, %bb24 ], [ 23, %bb25 ], [ 24, %bb26 ], [ 25, %bb27 ], [ 26, %bb28 ], [ 27, %bb29 ], [ 28, %bb30 ], [ 29, %bb31 ], [ 30, %bb32 ], [ 31, %bb33 ], [ 32, %bb34 ], [ 33, %bb35 ], [ 34, %bb36 ], [ 35, %bb37 ], [ 36, %bb38 ], [ 37, %bb39 ], [ 38, %bb40 ], [ 39, %bb41 ], [ 40, %bb42 ], [ 41, %bb43 ], [ 42, %bb44 ], [ 43, %bb45 ], [ 44, %bb46 ], [ 45, %bb47 ], [ 46, %bb48 ], [ 47, %bb49 ], [ 48, %bb50 ], [ 49, %bb51 ], [ 50, %bb52 ], [ 51, %bb53 ], [ 52, %bb54 ], [ 53, %bb55 ], [ 54, %bb56 ]
-  %tmp59 = getelementptr inbounds [55 x %0], [55 x %0]* @global, i64 0, i64 %tmp58, i32 1
-  %tmp60 = load i32*, i32** %tmp59, align 8
-  %tmp61 = call %1* @wibble(%1* %arg, i32* %tmp60)
+  %tmp59 = getelementptr inbounds [55 x %0], ptr @global, i64 0, i64 %tmp58, i32 1
+  %tmp60 = load ptr, ptr %tmp59, align 8
+  %tmp61 = call ptr @wibble(ptr %arg, ptr %tmp60)
   br label %bb68
 
 bb62:                                             ; preds = %bb69
-  %tmp63 = call %1* @wibble(%1* %arg, i32* nonnull %arg2)
+  %tmp63 = call ptr @wibble(ptr %arg, ptr nonnull %arg2)
   br label %bb68
 
 bb64:                                             ; preds = %bb69
-  %tmp65 = bitcast [16 x i32]* %tmp to i8*
-  %tmp66 = getelementptr inbounds [16 x i32], [16 x i32]* %tmp, i64 0, i64 0
-  call void @baz(i32 %arg1, i32* %tmp66)
-  %tmp67 = call %1* @wibble(%1* %arg, i32*  %tmp66)
+  call void @baz(i32 %arg1, ptr %tmp)
+  %tmp67 = call ptr @wibble(ptr %arg, ptr  %tmp)
   br label %bb68
 
 bb68:                                             ; preds = %bb64, %bb62, %bb57
   ret void
 
 bb69:                                             ; preds = %bb
-  %tmp70 = icmp eq i32* %arg2, null
+  %tmp70 = icmp eq ptr %arg2, null
   br i1 %tmp70, label %bb64, label %bb62
 }
 
-declare %1* @wibble(%1* returned, i32*)
+declare ptr @wibble(ptr returned, ptr)
 
-declare void @baz(i32, i32*)
+declare void @baz(i32, ptr)
 
 
 ; Check that with 2 jump tables, the phi node doesn't lose the edge from the
@@ -1120,7 +1117,7 @@ define void @jt_2_tables_phi_edge_from_second() {
   ; CHECK-NEXT:   [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; CHECK-NEXT:   [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; CHECK-NEXT:   [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 8)
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 8)
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LOAD]](s32), [[C]]
   ; CHECK-NEXT:   G_BRCOND [[ICMP]](s1), %bb.6
   ; CHECK-NEXT:   G_BR %bb.19
@@ -1249,7 +1246,7 @@ define void @jt_2_tables_phi_edge_from_second() {
   ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C21]](s32), %bb.30, [[PHI]](s32), %bb.16
   ; CHECK-NEXT:   RET_ReallyLR
 entry:
-  %0 = load i32, i32* undef, align 8
+  %0 = load i32, ptr undef, align 8
   switch i32 %0, label %sw.default.i49 [
     i32 270, label %if.then
     i32 265, label %sw.bb14.i48
@@ -1379,7 +1376,7 @@ return:
   ret i32 %retval.0
 }
 
-define i64* @test_range_phi_switch_cycle() {
+define ptr @test_range_phi_switch_cycle() {
   ; CHECK-LABEL: name: test_range_phi_switch_cycle
   ; CHECK: bb.1.bb:
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
@@ -1437,11 +1434,11 @@ bb3:                                              ; preds = %bb1
   br label %bb1
 
 bb4:                                              ; preds = %bb1
-  %tmp5 = tail call i64* @ham(i32 %tmp)
+  %tmp5 = tail call ptr @ham(i32 %tmp)
   unreachable
 }
 
-declare i64* @ham(i32)
+declare ptr @ham(i32)
 
 define internal void @bar() unnamed_addr #1 {
   ; CHECK-LABEL: name: bar

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index b28f076f72cf..5f3544add398 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -118,8 +118,8 @@ end:
 ; CHECK-NEXT: RET_ReallyLR
 ; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}:
 ; CHECK-NEXT: RET_ReallyLR
-define void @condbr(i1* %tstaddr) {
-  %tst = load i1, i1* %tstaddr
+define void @condbr(ptr %tstaddr) {
+  %tst = load i1, ptr %tstaddr
   br i1 %tst, label %true, label %false
 true:
   ret void
@@ -146,7 +146,7 @@ false:
 ; CHECK: [[BB_L2]].{{[a-zA-Z0-9.]+}} (ir-block-address-taken %ir-block.{{[a-zA-Z0-9.]+}}):
 ; CHECK-NEXT: RET_ReallyLR
 
- at indirectbr.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@indirectbr, %L1), i8* blockaddress(@indirectbr, %L2), i8* null], align 8
+ at indirectbr.L = internal unnamed_addr constant [3 x ptr] [ptr blockaddress(@indirectbr, %L1), ptr blockaddress(@indirectbr, %L2), ptr null], align 8
 
 define void @indirectbr() {
 entry:
@@ -155,9 +155,9 @@ L1:                                               ; preds = %entry, %L1
   %i = phi i32 [ 0, %entry ], [ %inc, %L1 ]
   %inc = add i32 %i, 1
   %idxprom = zext i32 %i to i64
-  %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @indirectbr.L, i64 0, i64 %idxprom
-  %brtarget = load i8*, i8** %arrayidx, align 8
-  indirectbr i8* %brtarget, [label %L1, label %L2]
+  %arrayidx = getelementptr inbounds [3 x ptr], ptr @indirectbr.L, i64 0, i64 %idxprom
+  %brtarget = load ptr, ptr %arrayidx, align 8
+  indirectbr ptr %brtarget, [label %L1, label %L2]
 L2:                                               ; preds = %L1
   ret void
 }
@@ -259,8 +259,8 @@ define i32 @subi32(i32 %arg1, i32 %arg2) {
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_PTRTOINT [[ARG1]]
 ; CHECK: $x0 = COPY [[RES]]
 ; CHECK: RET_ReallyLR implicit $x0
-define i64 @ptrtoint(i64* %a) {
-  %val = ptrtoint i64* %a to i64
+define i64 @ptrtoint(ptr %a) {
+  %val = ptrtoint ptr %a to i64
   ret i64 %val
 }
 
@@ -269,18 +269,17 @@ define i64 @ptrtoint(i64* %a) {
 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_INTTOPTR [[ARG1]]
 ; CHECK: $x0 = COPY [[RES]]
 ; CHECK: RET_ReallyLR implicit $x0
-define i64* @inttoptr(i64 %a) {
-  %val = inttoptr i64 %a to i64*
-  ret i64* %val
+define ptr @inttoptr(i64 %a) {
+  %val = inttoptr i64 %a to ptr
+  ret ptr %val
 }
 
 ; CHECK-LABEL: name: trivial_bitcast
 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: $x0 = COPY [[ARG1]]
 ; CHECK: RET_ReallyLR implicit $x0
-define i64* @trivial_bitcast(i8* %a) {
-  %val = bitcast i8* %a to i64*
-  ret i64* %val
+define ptr @trivial_bitcast(ptr %a) {
+  ret ptr %a
 }
 
 ; CHECK-LABEL: name: trivial_bitcast_with_copy
@@ -321,10 +320,10 @@ define i64 @bitcast(i64 %a) {
 ; CHECK: [[RES2:%[0-9]+]]:_(p0) = G_ADDRSPACE_CAST [[RES1]]
 ; CHECK: $x0 = COPY [[RES2]]
 ; CHECK: RET_ReallyLR implicit $x0
-define i64* @addrspacecast(i32 addrspace(1)* %a) {
-  %res1 = addrspacecast i32 addrspace(1)* %a to i64 addrspace(2)*
-  %res2 = addrspacecast i64 addrspace(2)* %res1 to i64*
-  ret i64* %res2
+define ptr @addrspacecast(ptr addrspace(1) %a) {
+  %res1 = addrspacecast ptr addrspace(1) %a to ptr addrspace(2)
+  %res2 = addrspacecast ptr addrspace(2) %res1 to ptr
+  ret ptr %res2
 }
 
 ; CHECK-LABEL: name: trunc
@@ -334,7 +333,7 @@ define i64* @addrspacecast(i32 addrspace(1)* %a) {
 ; CHECK: [[RES2:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[VEC]]
 define void @trunc(i64 %a) {
   %vecptr = alloca <4 x i32>
-  %vec = load <4 x i32>, <4 x i32>* %vecptr
+  %vec = load <4 x i32>, ptr %vecptr
   %res1 = trunc i64 %a to i8
   %res2 = trunc <4 x i32> %vec to <4 x i16>
   ret void
@@ -352,16 +351,16 @@ define void @trunc(i64 %a) {
 ; CHECK: [[SUM4:%[0-9]+]]:_(s64) = G_ADD [[SUM3]], [[VAL4]]
 ; CHECK: $x0 = COPY [[SUM4]]
 ; CHECK: RET_ReallyLR implicit $x0
-define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
-  %val1 = load i64, i64* %addr, align 16
+define i64 @load(ptr %addr, ptr addrspace(42) %addr42) {
+  %val1 = load i64, ptr %addr, align 16
 
-  %val2 = load i64, i64 addrspace(42)* %addr42
+  %val2 = load i64, ptr addrspace(42) %addr42
   %sum2 = add i64 %val1, %val2
 
-  %val3 = load volatile i64, i64* %addr
+  %val3 = load volatile i64, ptr %addr
   %sum3 = add i64 %sum2, %val3
 
-  %val4 = load i64, i64* %addr, !range !0
+  %val4 = load i64, ptr %addr, !range !0
   %sum4 = add i64 %sum3, %val4
   ret i64 %sum4
 }
@@ -375,10 +374,10 @@ define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
 ; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store (s64) into %ir.addr42, addrspace 42)
 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store (s64) into %ir.addr)
 ; CHECK: RET_ReallyLR
-define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) {
-  store i64 %val1, i64* %addr, align 16
-  store i64 %val2, i64 addrspace(42)* %addr42
-  store volatile i64 %val1, i64* %addr
+define void @store(ptr %addr, ptr addrspace(42) %addr42, i64 %val1, i64 %val2) {
+  store i64 %val1, ptr %addr, align 16
+  store i64 %val2, ptr addrspace(42) %addr42
+  store volatile i64 %val1, ptr %addr
   %sum = add i64 %val1, %val2
   ret void
 }
@@ -391,14 +390,14 @@ define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2)
 ; CHECK: [[VEC:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[PTR_VEC]]
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.st2), [[VEC]](<8 x s8>), [[VEC]](<8 x s8>), [[PTR]](p0)
 ; CHECK: RET_ReallyLR
-declare i8* @llvm.returnaddress(i32)
-declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
-declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*)
+declare ptr @llvm.returnaddress(i32)
+declare void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8>, <8 x i8>, ptr)
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr)
 define void @intrinsics(i32 %cur, i32 %bits) {
-  %ptr = call i8* @llvm.returnaddress(i32 0)
+  %ptr = call ptr @llvm.returnaddress(i32 0)
   %ptr.vec = alloca <8 x i8>
-  %vec = load <8 x i8>, <8 x i8>* %ptr.vec
-  call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec, <8 x i8> %vec, i8* %ptr)
+  %vec = load <8 x i8>, ptr %ptr.vec
+  call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec, <8 x i8> %vec, ptr %ptr)
   ret void
 }
 
@@ -414,15 +413,15 @@ define void @intrinsics(i32 %cur, i32 %bits) {
 
 ; CHECK:     [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]]
 ; CHECK:     $w0 = COPY [[RES]]
-define i32 @test_phi(i32* %addr1, i32* %addr2, i1 %tst) {
+define i32 @test_phi(ptr %addr1, ptr %addr2, i1 %tst) {
   br i1 %tst, label %true, label %false
 
 true:
-  %res1 = load i32, i32* %addr1
+  %res1 = load i32, ptr %addr1
   br label %end
 
 false:
-  %res2 = load i32, i32* %addr2
+  %res2 = load i32, ptr %addr2
   br label %end
 
 end:
@@ -481,8 +480,8 @@ define i32 @test_undef() {
 ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ONE]]
 ; CHECK: $x0 = COPY [[PTR]]
-define i8* @test_constant_inttoptr() {
-  ret i8* inttoptr(i64 1 to i8*)
+define ptr @test_constant_inttoptr() {
+  ret ptr inttoptr(i64 1 to ptr)
 }
 
   ; This failed purely because the Constant -> VReg map was kept across
@@ -593,8 +592,8 @@ define i32 @test_urem(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: name: test_constant_null
 ; CHECK: [[NULL:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
 ; CHECK: $x0 = COPY [[NULL]]
-define i8* @test_constant_null() {
-  ret i8* null
+define ptr @test_constant_null() {
+  ret ptr null
 }
 
 ; CHECK-LABEL: name: test_struct_memops
@@ -606,9 +605,9 @@ define i8* @test_constant_null() {
 ; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store (s8) into %ir.addr, align 4)
 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
 ; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store (s32) into %ir.addr + 4)
-define void @test_struct_memops({ i8, i32 }* %addr) {
-  %val = load { i8, i32 }, { i8, i32 }* %addr
-  store { i8, i32 } %val, { i8, i32 }* %addr
+define void @test_struct_memops(ptr %addr) {
+  %val = load { i8, i32 }, ptr %addr
+  store { i8, i32 } %val, ptr %addr
   ret void
 }
 
@@ -616,9 +615,9 @@ define void @test_struct_memops({ i8, i32 }* %addr) {
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s1) = G_LOAD [[ADDR]](p0) :: (load (s1) from  %ir.addr)
 ; CHECK: G_STORE [[VAL]](s1), [[ADDR]](p0) :: (store (s1) into  %ir.addr)
-define void @test_i1_memops(i1* %addr) {
-  %val = load i1, i1* %addr
-  store i1 %val, i1* %addr
+define void @test_i1_memops(ptr %addr) {
+  %val = load i1, ptr %addr
+  store i1 %val, ptr %addr
   ret void
 }
 
@@ -628,9 +627,9 @@ define void @test_i1_memops(i1* %addr) {
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LHS]](s32), [[RHS]]
 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
-define void @int_comparison(i32 %a, i32 %b, i1* %addr) {
+define void @int_comparison(i32 %a, i32 %b, ptr %addr) {
   %res = icmp ne i32 %a, %b
-  store i1 %res, i1* %addr
+  store i1 %res, ptr %addr
   ret void
 }
 
@@ -640,9 +639,9 @@ define void @int_comparison(i32 %a, i32 %b, i1* %addr) {
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LHS]](p0), [[RHS]]
 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
-define void @ptr_comparison(i8* %a, i8* %b, i1* %addr) {
-  %res = icmp eq i8* %a, %b
-  store i1 %res, i1* %addr
+define void @ptr_comparison(ptr %a, ptr %b, ptr %addr) {
+  %res = icmp eq ptr %a, %b
+  store i1 %res, ptr %addr
   ret void
 }
 
@@ -711,9 +710,9 @@ define float @test_frem(float %arg1, float %arg2) {
 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
-define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
+define void @test_sadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
   %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %lhs, i32 %rhs)
-  store { i32, i1 } %res, { i32, i1 }* %addr
+  store { i32, i1 } %res, ptr %addr
   ret void
 }
 
@@ -727,9 +726,9 @@ define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
-define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
+define void @test_uadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
   %res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs)
-  store { i32, i1 } %res, { i32, i1 }* %addr
+  store { i32, i1 } %res, ptr %addr
   ret void
 }
 
@@ -743,9 +742,9 @@ define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
-define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
+define void @test_ssub_overflow(i32 %lhs, i32 %rhs, ptr %subr) {
   %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %lhs, i32 %rhs)
-  store { i32, i1 } %res, { i32, i1 }* %subr
+  store { i32, i1 } %res, ptr %subr
   ret void
 }
 
@@ -759,9 +758,9 @@ define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
-define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
+define void @test_usub_overflow(i32 %lhs, i32 %rhs, ptr %subr) {
   %res = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %lhs, i32 %rhs)
-  store { i32, i1 } %res, { i32, i1 }* %subr
+  store { i32, i1 } %res, ptr %subr
   ret void
 }
 
@@ -775,9 +774,9 @@ define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
 declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
-define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
+define void @test_smul_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
   %res = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %lhs, i32 %rhs)
-  store { i32, i1 } %res, { i32, i1 }* %addr
+  store { i32, i1 } %res, ptr %addr
   ret void
 }
 
@@ -791,9 +790,9 @@ define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
 ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
-define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
+define void @test_umul_overflow(i32 %lhs, i32 %rhs, ptr %addr) {
   %res = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %lhs, i32 %rhs)
-  store { i32, i1 } %res, { i32, i1 }* %addr
+  store { i32, i1 } %res, ptr %addr
   ret void
 }
 
@@ -811,8 +810,8 @@ define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
 ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
 ; CHECK: $w0 = COPY [[LD3]](s32)
 %struct.nested = type {i8, { i8, i32 }, i32}
-define i32 @test_extractvalue(%struct.nested* %addr) {
-  %struct = load %struct.nested, %struct.nested* %addr
+define i32 @test_extractvalue(ptr %addr) {
+  %struct = load %struct.nested, ptr %addr
   %res = extractvalue %struct.nested %struct, 1, 1
   ret i32 %res
 }
@@ -833,10 +832,10 @@ define i32 @test_extractvalue(%struct.nested* %addr) {
 ; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store (s8) into %ir.addr2, align 4)
 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
 ; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store (s32) into %ir.addr2 + 4)
-define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
-  %struct = load %struct.nested, %struct.nested* %addr
+define void @test_extractvalue_agg(ptr %addr, ptr %addr2) {
+  %struct = load %struct.nested, ptr %addr
   %res = extractvalue %struct.nested %struct, 1
-  store {i8, i32} %res, {i8, i32}* %addr2
+  store {i8, i32} %res, ptr %addr2
   ret void
 }
 
@@ -845,9 +844,9 @@ define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
 ; CHECK: [[VAL32:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_TRUNC [[VAL32]]
 ; CHECK: G_STORE [[VAL]](s8), [[STRUCT]](p0)
-define void @test_trivial_extract_ptr([1 x i8*] %s, i8 %val) {
-  %addr = extractvalue [1 x i8*] %s, 0
-  store i8 %val, i8* %addr
+define void @test_trivial_extract_ptr([1 x ptr] %s, i8 %val) {
+  %addr = extractvalue [1 x ptr] %s, 0
+  store i8 %val, ptr %addr
   ret void
 }
 
@@ -871,10 +870,10 @@ define void @test_trivial_extract_ptr([1 x i8*] %s, i8 %val) {
 ; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store (s32) into %ir.addr + 8)
 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
 ; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 12)
-define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
-  %struct = load %struct.nested, %struct.nested* %addr
+define void @test_insertvalue(ptr %addr, i32 %val) {
+  %struct = load %struct.nested, ptr %addr
   %newstruct = insertvalue %struct.nested %struct, i32 %val, 1, 1
-  store %struct.nested %newstruct, %struct.nested* %addr
+  store %struct.nested %newstruct, ptr %addr
   ret void
 }
 
@@ -887,13 +886,13 @@ define [1 x i64] @test_trivial_insert([1 x i64] %s, i64 %val) {
   ret [1 x i64] %res
 }
 
-define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
+define [1 x ptr] @test_trivial_insert_ptr([1 x ptr] %s, ptr %val) {
 ; CHECK-LABEL: name: test_trivial_insert_ptr
 ; CHECK: [[STRUCT:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK: $x0 = COPY [[VAL]]
-  %res = insertvalue [1 x i8*] %s, i8* %val, 0
-  ret [1 x i8*] %res
+  %res = insertvalue [1 x ptr] %s, ptr %val, 0
+  ret [1 x ptr] %res
 }
 
 ; CHECK-LABEL: name: test_insertvalue_agg
@@ -919,11 +918,11 @@ define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
 ; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 8)
 ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
 ; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store (s32) into %ir.addr + 12)
-define void @test_insertvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
-  %smallstruct = load {i8, i32}, {i8, i32}* %addr2
-  %struct = load %struct.nested, %struct.nested* %addr
+define void @test_insertvalue_agg(ptr %addr, ptr %addr2) {
+  %smallstruct = load {i8, i32}, ptr %addr2
+  %struct = load %struct.nested, ptr %addr
   %res = insertvalue %struct.nested %struct, {i8, i32} %smallstruct, 1
-  store %struct.nested %res, %struct.nested* %addr
+  store %struct.nested %res, ptr %addr
   ret void
 }
 
@@ -977,9 +976,9 @@ define float @test_select_cmp_flags(float %cmp0, float %cmp1, float %lhs, float
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TSTASSERT]]
 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
 ; CHECK: $x0 = COPY [[RES]]
-define i8* @test_select_ptr(i1 %tst, i8* %lhs, i8* %rhs) {
-  %res = select i1 %tst, i8* %lhs, i8* %rhs
-  ret i8* %res
+define ptr @test_select_ptr(i1 %tst, ptr %lhs, ptr %rhs) {
+  %res = select i1 %tst, ptr %lhs, ptr %rhs
+  ret ptr %res
 }
 
 ; CHECK-LABEL: name: test_select_vec
@@ -1014,8 +1013,8 @@ define <4 x i32> @test_vselect_vec(<4 x i32> %tst32, <4 x i32> %lhs, <4 x i32> %
 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOSI [[FP]](s32)
 ; CHECK: $x0 = COPY [[RES]]
-define i64 @test_fptosi(float* %fp.addr) {
-  %fp = load float, float* %fp.addr
+define i64 @test_fptosi(ptr %fp.addr) {
+  %fp = load float, ptr %fp.addr
   %res = fptosi float %fp to i64
   ret i64 %res
 }
@@ -1025,8 +1024,8 @@ define i64 @test_fptosi(float* %fp.addr) {
 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOUI [[FP]](s32)
 ; CHECK: $x0 = COPY [[RES]]
-define i64 @test_fptoui(float* %fp.addr) {
-  %fp = load float, float* %fp.addr
+define i64 @test_fptoui(ptr %fp.addr) {
+  %fp = load float, ptr %fp.addr
   %res = fptoui float %fp to i64
   ret i64 %res
 }
@@ -1036,9 +1035,9 @@ define i64 @test_fptoui(float* %fp.addr) {
 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_SITOFP [[IN]](s32)
 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
-define void @test_sitofp(double* %addr, i32 %in) {
+define void @test_sitofp(ptr %addr, i32 %in) {
   %fp = sitofp i32 %in to double
-  store double %fp, double* %addr
+  store double %fp, ptr %addr
   ret void
 }
 
@@ -1047,9 +1046,9 @@ define void @test_sitofp(double* %addr, i32 %in) {
 ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_UITOFP [[IN]](s32)
 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
-define void @test_uitofp(double* %addr, i32 %in) {
+define void @test_uitofp(ptr %addr, i32 %in) {
   %fp = uitofp i32 %in to double
-  store double %fp, double* %addr
+  store double %fp, ptr %addr
   ret void
 }
 
@@ -1075,8 +1074,8 @@ define float @test_fptrunc(double %in) {
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[TMP:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.500000e+00
 ; CHECK: G_STORE [[TMP]](s32), [[ADDR]](p0)
-define void @test_constant_float(float* %addr) {
-  store float 1.5, float* %addr
+define void @test_constant_float(ptr %addr) {
+  store float 1.5, ptr %addr
   ret void
 }
 
@@ -1088,11 +1087,11 @@ define void @test_constant_float(float* %addr) {
 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = G_LOAD [[RHSADDR]](p0)
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = nnan ninf nsz arcp contract afn reassoc G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
 ; CHECK: G_STORE [[TST]](s1), [[BOOLADDR]](p0)
-define void @float_comparison(float* %a.addr, float* %b.addr, i1* %bool.addr) {
-  %a = load float, float* %a.addr
-  %b = load float, float* %b.addr
+define void @float_comparison(ptr %a.addr, ptr %b.addr, ptr %bool.addr) {
+  %a = load float, ptr %a.addr
+  %b = load float, ptr %b.addr
   %res = fcmp nnan ninf nsz arcp contract afn reassoc oge float %a, %b
-  store i1 %res, i1* %bool.addr
+  store i1 %res, ptr %bool.addr
   ret void
 }
 
@@ -1111,93 +1110,93 @@ define i1 @trivial_float_comparison(double %a, double %b) {
 
 @var = global i32 0
 
-define i32* @test_global() {
+define ptr @test_global() {
 ; CHECK-LABEL: name: test_global
 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var{{$}}
 ; CHECK: $x0 = COPY [[TMP]](p0)
 
-  ret i32* @var
+  ret ptr @var
 }
 
 @var1 = addrspace(42) global i32 0
-define i32 addrspace(42)* @test_global_addrspace() {
+define ptr addrspace(42) @test_global_addrspace() {
 ; CHECK-LABEL: name: test_global
 ; CHECK: [[TMP:%[0-9]+]]:_(p42) = G_GLOBAL_VALUE @var1{{$}}
 ; CHECK: $x0 = COPY [[TMP]](p42)
 
-  ret i32 addrspace(42)* @var1
+  ret ptr addrspace(42) @var1
 }
 
 
-define void()* @test_global_func() {
+define ptr @test_global_func() {
 ; CHECK-LABEL: name: test_global_func
 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @allocai64{{$}}
 ; CHECK: $x0 = COPY [[TMP]](p0)
 
-  ret void()* @allocai64
+  ret ptr @allocai64
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
-define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
+define void @test_memcpy(ptr %dst, ptr %src, i64 %size) {
 ; CHECK-LABEL: name: test_memcpy
 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
 ; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %size, i1 0)
   ret void
 }
 
-define void @test_memcpy_tail(i8* %dst, i8* %src, i64 %size) {
+define void @test_memcpy_tail(ptr %dst, ptr %src, i64 %size) {
 ; CHECK-LABEL: name: test_memcpy_tail
 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
 ; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 1 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 %size, i1 0)
   ret void
 }
 
-declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)*, i8 addrspace(1)*, i64, i1)
-define void @test_memcpy_nonzero_as(i8 addrspace(1)* %dst, i8 addrspace(1) * %src, i64 %size) {
+declare void @llvm.memcpy.p1.p1.i64(ptr addrspace(1), ptr addrspace(1), i64, i1)
+define void @test_memcpy_nonzero_as(ptr addrspace(1) %dst, ptr addrspace(1) %src, i64 %size) {
 ; CHECK-LABEL: name: test_memcpy_nonzero_as
 ; CHECK: [[DST:%[0-9]+]]:_(p1) = COPY $x0
 ; CHECK: [[SRC:%[0-9]+]]:_(p1) = COPY $x1
 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
 ; CHECK: G_MEMCPY [[DST]](p1), [[SRC]](p1), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 1)
-  call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %size, i1 0)
+  call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) %dst, ptr addrspace(1) %src, i64 %size, i1 0)
   ret void
 }
 
-declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1)
-define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
+declare void @llvm.memmove.p0.p0.i64(ptr, ptr, i64, i1)
+define void @test_memmove(ptr %dst, ptr %src, i64 %size) {
 ; CHECK-LABEL: name: test_memmove
 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
 ; CHECK: G_MEMMOVE [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 %size, i1 0)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)
-define void @test_memset(i8* %dst, i8 %val, i64 %size) {
+declare void @llvm.memset.p0.i64(ptr, i8, i64, i1)
+define void @test_memset(ptr %dst, i8 %val, i64 %size) {
 ; CHECK-LABEL: name: test_memset
 ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]]
 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
 ; CHECK: G_MEMSET [[DST]](p0), [[SRC]](s8), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst)
-  call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
+  call void @llvm.memset.p0.i64(ptr %dst, i8 %val, i64 %size, i1 0)
   ret void
 }
 
-define void @test_large_const(i128* %addr) {
+define void @test_large_const(ptr %addr) {
 ; CHECK-LABEL: name: test_large_const
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s128) = G_CONSTANT i128 42
 ; CHECK: G_STORE [[VAL]](s128), [[ADDR]](p0)
-  store i128 42, i128* %addr
+  store i128 42, ptr %addr
   ret void
 }
 
@@ -1205,7 +1204,7 @@ define void @test_large_const(i128* %addr) {
 ; to insert the constants at the end of the block, even if they were encountered
 ; after the block's terminators had been emitted. Also make sure the order is
 ; correct.
-define i8* @test_const_placement() {
+define ptr @test_const_placement() {
 ; CHECK-LABEL: name: test_const_placement
 ; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}):
 ; CHECK:   [[VAL_INT:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
@@ -1214,29 +1213,29 @@ define i8* @test_const_placement() {
   br label %next
 
 next:
-  ret i8* inttoptr(i32 42 to i8*)
+  ret ptr inttoptr(i32 42 to ptr)
 }
 
-declare void @llvm.va_end(i8*)
-define void @test_va_end(i8* %list) {
+declare void @llvm.va_end(ptr)
+define void @test_va_end(ptr %list) {
 ; CHECK-LABEL: name: test_va_end
 ; CHECK-NOT: va_end
 ; CHECK-NOT: INTRINSIC
 ; CHECK: RET_ReallyLR
-  call void @llvm.va_end(i8* %list)
+  call void @llvm.va_end(ptr %list)
   ret void
 }
 
-define void @test_va_arg(i8* %list) {
+define void @test_va_arg(ptr %list) {
 ; CHECK-LABEL: test_va_arg
 ; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: G_VAARG [[LIST]](p0), 8
 ; CHECK: G_VAARG [[LIST]](p0), 1
 ; CHECK: G_VAARG [[LIST]](p0), 16
 
-  %v0 = va_arg i8* %list, i64
-  %v1 = va_arg i8* %list, i8
-  %v2 = va_arg i8* %list, i128
+  %v0 = va_arg ptr %list, i64
+  %v1 = va_arg ptr %list, i8
+  %v2 = va_arg ptr %list, i128
   ret void
 }
 
@@ -1450,8 +1449,8 @@ define i32 @test_fshr_intrinsic(i32 %a, i32 %b, i32 %c) {
   ret i32 %res
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8*)
-declare void @llvm.lifetime.end.p0i8(i64, i8*)
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
 define void @test_lifetime_intrin() {
 ; CHECK-LABEL: name: test_lifetime_intrin
 ; CHECK: RET_ReallyLR
@@ -1462,13 +1461,13 @@ define void @test_lifetime_intrin() {
 ; O3-NEXT: LIFETIME_END %stack.0.slot
 ; O3-NEXT: RET_ReallyLR
   %slot = alloca i8, i32 4
-  call void @llvm.lifetime.start.p0i8(i64 0, i8* %slot)
-  store volatile i8 10, i8* %slot
-  call void @llvm.lifetime.end.p0i8(i64 0, i8* %slot)
+  call void @llvm.lifetime.start.p0(i64 0, ptr %slot)
+  store volatile i8 10, ptr %slot
+  call void @llvm.lifetime.end.p0(i64 0, ptr %slot)
   ret void
 }
 
-define void @test_load_store_atomics(i8* %addr) {
+define void @test_load_store_atomics(ptr %addr) {
 ; CHECK-LABEL: name: test_load_store_atomics
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered (s8) from %ir.addr)
@@ -1477,14 +1476,14 @@ define void @test_load_store_atomics(i8* %addr) {
 ; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release (s8) into %ir.addr)
 ; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst (s8) from %ir.addr)
 ; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic (s8) into %ir.addr)
-  %v0 = load atomic i8, i8* %addr unordered, align 1
-  store atomic i8 %v0, i8* %addr monotonic, align 1
+  %v0 = load atomic i8, ptr %addr unordered, align 1
+  store atomic i8 %v0, ptr %addr monotonic, align 1
 
-  %v1 = load atomic i8, i8* %addr acquire, align 1
-  store atomic i8 %v1, i8* %addr release, align 1
+  %v1 = load atomic i8, ptr %addr acquire, align 1
+  store atomic i8 %v1, ptr %addr release, align 1
 
-  %v2 = load atomic i8, i8* %addr syncscope("singlethread") seq_cst, align 1
-  store atomic i8 %v2, i8* %addr syncscope("singlethread") monotonic, align 1
+  %v2 = load atomic i8, ptr %addr syncscope("singlethread") seq_cst, align 1
+  store atomic i8 %v2, ptr %addr syncscope("singlethread") monotonic, align 1
 
   ret void
 }
@@ -1803,28 +1802,28 @@ define <4 x half> @test_constant_vector() {
   ret <4 x half> <half undef, half undef, half undef, half 0xH3C00>
 }
 
-define i32 @test_target_mem_intrinsic(i32* %addr) {
+define i32 @test_target_mem_intrinsic(ptr %addr) {
 ; CHECK-LABEL: name: test_target_mem_intrinsic
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load (s32) from %ir.addr)
 ; CHECK: G_TRUNC [[VAL]](s64)
-  %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %addr)
   %trunc = trunc i64 %val to i32
   ret i32 %trunc
 }
 
-declare i64 @llvm.aarch64.ldxr.p0i32(i32*) nounwind
+declare i64 @llvm.aarch64.ldxr.p0(ptr) nounwind
 
 %zerosize_type = type {}
 
-define %zerosize_type @test_empty_load_store(%zerosize_type *%ptr, %zerosize_type %in) noinline optnone {
+define %zerosize_type @test_empty_load_store(ptr %ptr, %zerosize_type %in) noinline optnone {
 ; CHECK-LABEL: name: test_empty_load_store
 ; CHECK-NOT: G_STORE
 ; CHECK-NOT: G_LOAD
 ; CHECK: RET_ReallyLR
 entry:
-  store %zerosize_type undef, %zerosize_type* undef, align 4
-  %val = load %zerosize_type, %zerosize_type* %ptr, align 4
+  store %zerosize_type undef, ptr undef, align 4
+  %val = load %zerosize_type, ptr %ptr, align 4
   ret %zerosize_type %in
 }
 
@@ -1868,7 +1867,7 @@ exit:
   ret i64 %res
 }
 
-define void @test_phi_diamond({ i8, i16, i32 }* %a.ptr, { i8, i16, i32 }* %b.ptr, i1 %selector, { i8, i16, i32 }* %dst) {
+define void @test_phi_diamond(ptr %a.ptr, ptr %b.ptr, i1 %selector, ptr %dst) {
 ; CHECK-LABEL: name: test_phi_diamond
 ; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[ARG2:%[0-9]+]]:_(p0) = COPY $x1
@@ -1913,16 +1912,16 @@ entry:
   br i1 %selector, label %store.a, label %store.b
 
 store.a:
-  %a = load { i8, i16, i32 }, { i8, i16, i32 }* %a.ptr
+  %a = load { i8, i16, i32 }, ptr %a.ptr
   br label %join
 
 store.b:
-  %b = load { i8, i16, i32 }, { i8, i16, i32 }* %b.ptr
+  %b = load { i8, i16, i32 }, ptr %b.ptr
   br label %join
 
 join:
   %v = phi { i8, i16, i32 } [ %a, %store.a ], [ %b, %store.b ]
-  store { i8, i16, i32 } %v, { i8, i16, i32 }* %dst
+  store { i8, i16, i32 } %v, ptr %dst
   ret void
 }
 
@@ -1930,7 +1929,7 @@ join:
 %agg.inner = type {i16, i8, %agg.inner.inner }
 %agg.nested = type {i32, i32, %agg.inner, i32}
 
-define void @test_nested_aggregate_const(%agg.nested *%ptr) {
+define void @test_nested_aggregate_const(ptr %ptr) {
 ; CHECK-LABEL: name: test_nested_aggregate_const
 ; CHECK: [[BASE:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -1958,7 +1957,7 @@ define void @test_nested_aggregate_const(%agg.nested *%ptr) {
 ; CHECK: [[CST12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST12]](s64)
 ; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store (s32) into %ir.ptr + 32, align 8)
-  store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, %agg.nested *%ptr
+  store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, ptr %ptr
   ret void
 }
 
@@ -1974,7 +1973,7 @@ define i1 @return_i1_zext() {
 }
 
 ; Try one cmpxchg
-define i32 @test_atomic_cmpxchg_1(i32* %addr) {
+define i32 @test_atomic_cmpxchg_1(ptr %addr) {
 ; CHECK-LABEL: name: test_atomic_cmpxchg_1
 ; CHECK:       bb.1.entry:
 ; CHECK-NEXT:  successors: %bb.{{[^)]+}}
@@ -1991,7 +1990,7 @@ define i32 @test_atomic_cmpxchg_1(i32* %addr) {
 entry:
   br label %repeat
 repeat:
-  %val_success = cmpxchg i32* %addr, i32 0, i32 1 monotonic monotonic
+  %val_success = cmpxchg ptr %addr, i32 0, i32 1 monotonic monotonic
   %value_loaded = extractvalue { i32, i1 } %val_success, 0
   %success = extractvalue { i32, i1 } %val_success, 1
   br i1 %success, label %done, label %repeat
@@ -2000,7 +1999,7 @@ done:
 }
 
 ; Try one cmpxchg
-define i32 @test_weak_atomic_cmpxchg_1(i32* %addr) {
+define i32 @test_weak_atomic_cmpxchg_1(ptr %addr) {
 ; CHECK-LABEL: name: test_weak_atomic_cmpxchg_1
 ; CHECK:       bb.1.entry:
 ; CHECK-NEXT:  successors: %bb.{{[^)]+}}
@@ -2017,7 +2016,7 @@ define i32 @test_weak_atomic_cmpxchg_1(i32* %addr) {
 entry:
   br label %repeat
 repeat:
-  %val_success = cmpxchg weak i32* %addr, i32 0, i32 1 monotonic monotonic
+  %val_success = cmpxchg weak ptr %addr, i32 0, i32 1 monotonic monotonic
   %value_loaded = extractvalue { i32, i1 } %val_success, 0
   %success = extractvalue { i32, i1 } %val_success, 1
   br i1 %success, label %done, label %repeat
@@ -2026,7 +2025,7 @@ done:
 }
 
 ; Try one cmpxchg with a small type and high atomic ordering.
-define i16 @test_atomic_cmpxchg_2(i16* %addr) {
+define i16 @test_atomic_cmpxchg_2(ptr %addr) {
 ; CHECK-LABEL: name: test_atomic_cmpxchg_2
 ; CHECK:       bb.1.entry:
 ; CHECK-NEXT:  successors: %bb.2({{[^)]+}})
@@ -2043,7 +2042,7 @@ define i16 @test_atomic_cmpxchg_2(i16* %addr) {
 entry:
   br label %repeat
 repeat:
-  %val_success = cmpxchg i16* %addr, i16 0, i16 1 seq_cst seq_cst
+  %val_success = cmpxchg ptr %addr, i16 0, i16 1 seq_cst seq_cst
   %value_loaded = extractvalue { i16, i1 } %val_success, 0
   %success = extractvalue { i16, i1 } %val_success, 1
   br i1 %success, label %done, label %repeat
@@ -2052,7 +2051,7 @@ done:
 }
 
 ; Try one cmpxchg where the success order and failure order 
diff er.
-define i64 @test_atomic_cmpxchg_3(i64* %addr) {
+define i64 @test_atomic_cmpxchg_3(ptr %addr) {
 ; CHECK-LABEL: name: test_atomic_cmpxchg_3
 ; CHECK:       bb.1.entry:
 ; CHECK-NEXT:  successors: %bb.2({{[^)]+}})
@@ -2069,7 +2068,7 @@ define i64 @test_atomic_cmpxchg_3(i64* %addr) {
 entry:
   br label %repeat
 repeat:
-  %val_success = cmpxchg i64* %addr, i64 0, i64 1 seq_cst acquire
+  %val_success = cmpxchg ptr %addr, i64 0, i64 1 seq_cst acquire
   %value_loaded = extractvalue { i64, i1 } %val_success, 0
   %success = extractvalue { i64, i1 } %val_success, 1
   br i1 %success, label %done, label %repeat
@@ -2079,7 +2078,7 @@ done:
 
 ; Try a monotonic atomicrmw xchg
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_xchg(i256* %addr) {
+define i32 @test_atomicrmw_xchg(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_xchg
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2087,7 +2086,7 @@ define i32 @test_atomicrmw_xchg(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XCHG [[ADDR]](p0), [[VAL]] :: (load store monotonic (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw xchg i256* %addr, i256 1 monotonic
+  %oldval = atomicrmw xchg ptr %addr, i256 1 monotonic
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2096,7 +2095,7 @@ define i32 @test_atomicrmw_xchg(i256* %addr) {
 
 ; Try an acquire atomicrmw add
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_add(i256* %addr) {
+define i32 @test_atomicrmw_add(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_add
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2104,7 +2103,7 @@ define i32 @test_atomicrmw_add(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_ADD [[ADDR]](p0), [[VAL]] :: (load store acquire (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw add i256* %addr, i256 1 acquire
+  %oldval = atomicrmw add ptr %addr, i256 1 acquire
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2113,7 +2112,7 @@ define i32 @test_atomicrmw_add(i256* %addr) {
 
 ; Try a release atomicrmw sub
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_sub(i256* %addr) {
+define i32 @test_atomicrmw_sub(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_sub
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2121,7 +2120,7 @@ define i32 @test_atomicrmw_sub(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_SUB [[ADDR]](p0), [[VAL]] :: (load store release (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw sub i256* %addr, i256 1 release
+  %oldval = atomicrmw sub ptr %addr, i256 1 release
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2130,7 +2129,7 @@ define i32 @test_atomicrmw_sub(i256* %addr) {
 
 ; Try an acq_rel atomicrmw and
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_and(i256* %addr) {
+define i32 @test_atomicrmw_and(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_and
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2138,7 +2137,7 @@ define i32 @test_atomicrmw_and(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_AND [[ADDR]](p0), [[VAL]] :: (load store acq_rel (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw and i256* %addr, i256 1 acq_rel
+  %oldval = atomicrmw and ptr %addr, i256 1 acq_rel
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2147,7 +2146,7 @@ define i32 @test_atomicrmw_and(i256* %addr) {
 
 ; Try an seq_cst atomicrmw nand
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_nand(i256* %addr) {
+define i32 @test_atomicrmw_nand(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_nand
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2155,7 +2154,7 @@ define i32 @test_atomicrmw_nand(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_NAND [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw nand i256* %addr, i256 1 seq_cst
+  %oldval = atomicrmw nand ptr %addr, i256 1 seq_cst
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2164,7 +2163,7 @@ define i32 @test_atomicrmw_nand(i256* %addr) {
 
 ; Try an seq_cst atomicrmw or
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_or(i256* %addr) {
+define i32 @test_atomicrmw_or(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_or
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2172,7 +2171,7 @@ define i32 @test_atomicrmw_or(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_OR [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw or i256* %addr, i256 1 seq_cst
+  %oldval = atomicrmw or ptr %addr, i256 1 seq_cst
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2181,7 +2180,7 @@ define i32 @test_atomicrmw_or(i256* %addr) {
 
 ; Try an seq_cst atomicrmw xor
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_xor(i256* %addr) {
+define i32 @test_atomicrmw_xor(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_xor
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2189,7 +2188,7 @@ define i32 @test_atomicrmw_xor(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XOR [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw xor i256* %addr, i256 1 seq_cst
+  %oldval = atomicrmw xor ptr %addr, i256 1 seq_cst
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2198,7 +2197,7 @@ define i32 @test_atomicrmw_xor(i256* %addr) {
 
 ; Try an seq_cst atomicrmw min
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_min(i256* %addr) {
+define i32 @test_atomicrmw_min(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_min
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2206,7 +2205,7 @@ define i32 @test_atomicrmw_min(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw min i256* %addr, i256 1 seq_cst
+  %oldval = atomicrmw min ptr %addr, i256 1 seq_cst
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2215,7 +2214,7 @@ define i32 @test_atomicrmw_min(i256* %addr) {
 
 ; Try an seq_cst atomicrmw max
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_max(i256* %addr) {
+define i32 @test_atomicrmw_max(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_max
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2223,7 +2222,7 @@ define i32 @test_atomicrmw_max(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw max i256* %addr, i256 1 seq_cst
+  %oldval = atomicrmw max ptr %addr, i256 1 seq_cst
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2232,7 +2231,7 @@ define i32 @test_atomicrmw_max(i256* %addr) {
 
 ; Try an seq_cst atomicrmw unsigned min
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_umin(i256* %addr) {
+define i32 @test_atomicrmw_umin(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_umin
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2240,7 +2239,7 @@ define i32 @test_atomicrmw_umin(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw umin i256* %addr, i256 1 seq_cst
+  %oldval = atomicrmw umin ptr %addr, i256 1 seq_cst
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
@@ -2249,7 +2248,7 @@ define i32 @test_atomicrmw_umin(i256* %addr) {
 
 ; Try an seq_cst atomicrmw unsigned max
 ; AArch64 will expand some atomicrmw's at the LLVM-IR level so we use a wide type to avoid this.
-define i32 @test_atomicrmw_umax(i256* %addr) {
+define i32 @test_atomicrmw_umax(ptr %addr) {
 ; CHECK-LABEL: name: test_atomicrmw_umax
 ; CHECK:       bb.1 (%ir-block.{{[0-9]+}}):
 ; CHECK-NEXT:  liveins: $x0
@@ -2257,36 +2256,35 @@ define i32 @test_atomicrmw_umax(i256* %addr) {
 ; CHECK-NEXT:    [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
 ; CHECK-NEXT:    [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
 ; CHECK-NEXT:    [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
-  %oldval = atomicrmw umax i256* %addr, i256 1 seq_cst
+  %oldval = atomicrmw umax ptr %addr, i256 1 seq_cst
   ; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
   ;        test so work around it by truncating to i32 for now.
   %oldval.trunc = trunc i256 %oldval to i32
   ret i32 %oldval.trunc
 }
 
- at addr = global i8* null
+ at addr = global ptr null
 
 define void @test_blockaddress() {
 ; CHECK-LABEL: name: test_blockaddress
 ; CHECK: [[BADDR:%[0-9]+]]:_(p0) = G_BLOCK_ADDR blockaddress(@test_blockaddress, %ir-block.block)
 ; CHECK: G_STORE [[BADDR]](p0)
-  store i8* blockaddress(@test_blockaddress, %block), i8** @addr
-  indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block]
+  store ptr blockaddress(@test_blockaddress, %block), ptr @addr
+  indirectbr ptr blockaddress(@test_blockaddress, %block), [label %block]
 block:
   ret void
 }
 
 %t = type { i32 }
-declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) readonly nounwind
-declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
+declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) readonly nounwind
+declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture) nounwind
 define void @test_invariant_intrin() {
 ; CHECK-LABEL: name: test_invariant_intrin
 ; CHECK: %{{[0-9]+}}:_(s64) = G_IMPLICIT_DEF
 ; CHECK-NEXT: RET_ReallyLR
   %x = alloca %t
-  %y = bitcast %t* %x to i8*
-  %inv = call {}* @llvm.invariant.start.p0i8(i64 8, i8* %y)
-  call void @llvm.invariant.end.p0i8({}* %inv, i64 8, i8* %y)
+  %inv = call ptr @llvm.invariant.start.p0(i64 8, ptr %x)
+  call void @llvm.invariant.end.p0(ptr %inv, i64 8, ptr %x)
   ret void
 }
 
@@ -2373,14 +2371,14 @@ define float @test_nearbyint_f32(float %x) {
 
 ; CHECK-LABEL: name: test_llvm.aarch64.neon.ld3.v4i32.p0i32
 ; CHECK: %1:_(<4 x s32>), %2:_(<4 x s32>), %3:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld3), %0(p0) :: (load (s384) from %ir.ptr, align 64)
-define void @test_llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr) {
-  %arst = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr)
+define void @test_llvm.aarch64.neon.ld3.v4i32.p0i32(ptr %ptr) {
+  %arst = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr %ptr)
   ret void
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32*) #3
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr) #3
 
-define void @test_i1_arg_zext(void (i1)* %f) {
+define void @test_i1_arg_zext(ptr %f) {
 ; CHECK-LABEL: name: test_i1_arg_zext
 ; CHECK: [[I1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
 ; CHECK: [[ZEXT0:%[0-9]+]]:_(s8) = G_ZEXT [[I1]](s1)
@@ -2390,15 +2388,15 @@ define void @test_i1_arg_zext(void (i1)* %f) {
   ret void
 }
 
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
 define void @test_stacksaverestore() {
   ; CHECK-LABEL: name: test_stacksaverestore
   ; CHECK: [[SAVE:%[0-9]+]]:_(p0) = COPY $sp
   ; CHECK-NEXT: $sp = COPY [[SAVE]](p0)
   ; CHECK-NEXT: RET_ReallyLR
-  %sp = call i8* @llvm.stacksave()
-  call void @llvm.stackrestore(i8* %sp)
+  %sp = call ptr @llvm.stacksave()
+  call void @llvm.stackrestore(ptr %sp)
   ret void
 }
 
@@ -2420,7 +2418,7 @@ define void @test_assume(i1 %x) {
 }
 
 declare void @llvm.experimental.noalias.scope.decl(metadata)
-define void @test.llvm.noalias.scope.decl(i8* %P, i8* %Q) nounwind ssp {
+define void @test.llvm.noalias.scope.decl(ptr %P, ptr %Q) nounwind ssp {
   tail call void @llvm.experimental.noalias.scope.decl(metadata !3)
   ; CHECK-LABEL: name: test.llvm.noalias.scope.decl
   ; CHECK-NOT: llvm.experimental.noalias.scope.decl
@@ -2442,12 +2440,12 @@ define void @test_sideeffect() {
   ret void
 }
 
-declare void @llvm.var.annotation(i8*, i8*, i8*, i32, i8*)
-define void @test_var_annotation(i8*, i8*, i8*, i32) {
+declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr)
+define void @test_var_annotation(ptr, ptr, ptr, i32) {
   ; CHECK-LABEL: name:            test_var_annotation
   ; CHECK-NOT: llvm.var.annotation
   ; CHECK: RET_ReallyLR
-  call void @llvm.var.annotation(i8* %0, i8* %1, i8* %2, i32 %3, i8* null)
+  call void @llvm.var.annotation(ptr %0, ptr %1, ptr %2, i32 %3, ptr null)
   ret void
 }
 
@@ -2471,7 +2469,7 @@ define i64 @test_freeze(i64 %a) {
   ret i64 %res
 }
 
-define {i8, i32} @test_freeze_struct({ i8, i32 }* %addr) {
+define {i8, i32} @test_freeze_struct(ptr %addr) {
   ; CHECK-LABEL: name:            test_freeze_struct
   ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0)
@@ -2484,7 +2482,7 @@ define {i8, i32} @test_freeze_struct({ i8, i32 }* %addr) {
   ; CHECK-NEXT: $w0 = COPY [[ANYEXT]]
   ; CHECK-NEXT: $w1 = COPY [[FREEZE1]]
   ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
-  %load = load { i8, i32 }, { i8, i32 }* %addr
+  %load = load { i8, i32 }, ptr %addr
   %res = freeze {i8, i32} %load
   ret {i8, i32} %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
index 7f10e2ee3e6b..07d0f0616e19 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -global-isel -global-isel-abort=1 -stop-after=aarch64-expand-pseudo -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-NOLSE
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -global-isel -global-isel-abort=1 -stop-after=aarch64-expand-pseudo -mattr=+rcpc -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-LDAPR
 
-define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) {
+define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) {
   ; CHECK-LABEL: name: val_compare_and_swap
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -36,12 +36,12 @@ define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire, !pcsections !0
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire, !pcsections !0
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) {
+define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) {
   ; CHECK-LABEL: name: val_compare_and_swap_from_load
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -76,13 +76,13 @@ define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %new = load i32, i32* %pnew, !pcsections !0
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire, !pcsections !0
+  %new = load i32, ptr %pnew, !pcsections !0
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire, !pcsections !0
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) {
+define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) {
   ; CHECK-LABEL: name: val_compare_and_swap_rel
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -116,12 +116,12 @@ define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic, !pcsections !0
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acq_rel monotonic, !pcsections !0
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) {
+define i64 @val_compare_and_swap_64(ptr %p, i64 %cmp, i64 %new) {
   ; CHECK-LABEL: name: val_compare_and_swap_64
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -155,12 +155,12 @@ define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $x0 = ORRXrs $xzr, killed $x8, 0
   ; CHECK-NEXT:   RET undef $lr, implicit $x0
-  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic, !pcsections !0
+  %pair = cmpxchg ptr %p, i64 %cmp, i64 %new monotonic monotonic, !pcsections !0
   %val = extractvalue { i64, i1 } %pair, 0
   ret i64 %val
 }
 
-define i64 @val_compare_and_swap_64_monotonic_seqcst(i64* %p, i64 %cmp, i64 %new) {
+define i64 @val_compare_and_swap_64_monotonic_seqcst(ptr %p, i64 %cmp, i64 %new) {
   ; CHECK-LABEL: name: val_compare_and_swap_64_monotonic_seqcst
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -194,12 +194,12 @@ define i64 @val_compare_and_swap_64_monotonic_seqcst(i64* %p, i64 %cmp, i64 %new
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $x0 = ORRXrs $xzr, killed $x8, 0
   ; CHECK-NEXT:   RET undef $lr, implicit $x0
-  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic seq_cst, !pcsections !0
+  %pair = cmpxchg ptr %p, i64 %cmp, i64 %new monotonic seq_cst, !pcsections !0
   %val = extractvalue { i64, i1 } %pair, 0
   ret i64 %val
 }
 
-define i64 @val_compare_and_swap_64_release_acquire(i64* %p, i64 %cmp, i64 %new) {
+define i64 @val_compare_and_swap_64_release_acquire(ptr %p, i64 %cmp, i64 %new) {
   ; CHECK-LABEL: name: val_compare_and_swap_64_release_acquire
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -233,12 +233,12 @@ define i64 @val_compare_and_swap_64_release_acquire(i64* %p, i64 %cmp, i64 %new)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $x0 = ORRXrs $xzr, killed $x8, 0
   ; CHECK-NEXT:   RET undef $lr, implicit $x0
-  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new release acquire, !pcsections !0
+  %pair = cmpxchg ptr %p, i64 %cmp, i64 %new release acquire, !pcsections !0
   %val = extractvalue { i64, i1 } %pair, 0
   ret i64 %val
 }
 
-define i32 @fetch_and_nand(i32* %p) {
+define i32 @fetch_and_nand(ptr %p) {
   ; CHECK-LABEL: name: fetch_and_nand
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -260,11 +260,11 @@ define i32 @fetch_and_nand(i32* %p) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %val = atomicrmw nand i32* %p, i32 7 release, !pcsections !0
+  %val = atomicrmw nand ptr %p, i32 7 release, !pcsections !0
   ret i32 %val
 }
 
-define i64 @fetch_and_nand_64(i64* %p) {
+define i64 @fetch_and_nand_64(ptr %p) {
   ; CHECK-LABEL: name: fetch_and_nand_64
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -286,11 +286,11 @@ define i64 @fetch_and_nand_64(i64* %p) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $x0 = ORRXrs $xzr, killed $x8, 0
   ; CHECK-NEXT:   RET undef $lr, implicit $x0
-  %val = atomicrmw nand i64* %p, i64 7 acq_rel, !pcsections !0
+  %val = atomicrmw nand ptr %p, i64 7 acq_rel, !pcsections !0
   ret i64 %val
 }
 
-define i32 @fetch_and_or(i32* %p) {
+define i32 @fetch_and_or(ptr %p) {
   ; CHECK-LABEL: name: fetch_and_or
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -312,11 +312,11 @@ define i32 @fetch_and_or(i32* %p) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %val = atomicrmw or i32* %p, i32 5 seq_cst, !pcsections !0
+  %val = atomicrmw or ptr %p, i32 5 seq_cst, !pcsections !0
   ret i32 %val
 }
 
-define i64 @fetch_and_or_64(i64* %p) {
+define i64 @fetch_and_or_64(ptr %p) {
   ; CHECK-LABEL: name: fetch_and_or_64
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -337,7 +337,7 @@ define i64 @fetch_and_or_64(i64* %p) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $x0 = ORRXrs $xzr, killed $x8, 0
   ; CHECK-NEXT:   RET undef $lr, implicit $x0
-  %val = atomicrmw or i64* %p, i64 7 monotonic, !pcsections !0
+  %val = atomicrmw or ptr %p, i64 7 monotonic, !pcsections !0
   ret i64 %val
 }
 
@@ -368,18 +368,18 @@ define void @seq_cst_fence() {
    ret void
 }
 
-define i32 @atomic_load(i32* %p) {
+define i32 @atomic_load(ptr %p) {
   ; CHECK-LABEL: name: atomic_load
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   renamable $w0 = LDARW killed renamable $x0, pcsections !0 :: (load seq_cst (s32) from %ir.p)
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-   %r = load atomic i32, i32* %p seq_cst, align 4, !pcsections !0
+   %r = load atomic i32, ptr %p seq_cst, align 4, !pcsections !0
    ret i32 %r
 }
 
-define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
+define i8 @atomic_load_relaxed_8(ptr %p, i32 %off32) {
   ; CHECK-LABEL: name: atomic_load_relaxed_8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $x0
@@ -393,25 +393,25 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
   ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w10, killed renamable $w9, 0
   ; CHECK-NEXT:   $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
-  %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1, !pcsections !0
+  %ptr_unsigned = getelementptr i8, ptr %p, i32 4095
+  %val_unsigned = load atomic i8, ptr %ptr_unsigned monotonic, align 1, !pcsections !0
 
-  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
-  %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1, !pcsections !0
+  %ptr_regoff = getelementptr i8, ptr %p, i32 %off32
+  %val_regoff = load atomic i8, ptr %ptr_regoff unordered, align 1, !pcsections !0
   %tot1 = add i8 %val_unsigned, %val_regoff, !pcsections !0
 
-  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
-  %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1, !pcsections !0
+  %ptr_unscaled = getelementptr i8, ptr %p, i32 -256
+  %val_unscaled = load atomic i8, ptr %ptr_unscaled monotonic, align 1, !pcsections !0
   %tot2 = add i8 %tot1, %val_unscaled, !pcsections !0
 
-  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
-  %val_random = load atomic i8, i8* %ptr_random unordered, align 1, !pcsections !0
+  %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+  %val_random = load atomic i8, ptr %ptr_random unordered, align 1, !pcsections !0
   %tot3 = add i8 %tot2, %val_random, !pcsections !0
 
   ret i8 %tot3
 }
 
-define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
+define i16 @atomic_load_relaxed_16(ptr %p, i32 %off32) {
   ; CHECK-LABEL: name: atomic_load_relaxed_16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $x0
@@ -425,25 +425,25 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
   ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w10, killed renamable $w9, 0
   ; CHECK-NEXT:   $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
-  %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2, !pcsections !0
+  %ptr_unsigned = getelementptr i16, ptr %p, i32 4095
+  %val_unsigned = load atomic i16, ptr %ptr_unsigned monotonic, align 2, !pcsections !0
 
-  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
-  %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2, !pcsections !0
+  %ptr_regoff = getelementptr i16, ptr %p, i32 %off32
+  %val_regoff = load atomic i16, ptr %ptr_regoff unordered, align 2, !pcsections !0
   %tot1 = add i16 %val_unsigned, %val_regoff, !pcsections !0
 
-  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
-  %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2, !pcsections !0
+  %ptr_unscaled = getelementptr i16, ptr %p, i32 -128
+  %val_unscaled = load atomic i16, ptr %ptr_unscaled monotonic, align 2, !pcsections !0
   %tot2 = add i16 %tot1, %val_unscaled, !pcsections !0
 
-  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
-  %val_random = load atomic i16, i16* %ptr_random unordered, align 2, !pcsections !0
+  %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+  %val_random = load atomic i16, ptr %ptr_random unordered, align 2, !pcsections !0
   %tot3 = add i16 %tot2, %val_random, !pcsections !0
 
   ret i16 %tot3
 }
 
-define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
+define i32 @atomic_load_relaxed_32(ptr %p, i32 %off32) {
   ; CHECK-LABEL: name: atomic_load_relaxed_32
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $x0
@@ -457,25 +457,25 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
   ; CHECK-NEXT:   $w9 = ADDWrs killed renamable $w10, killed renamable $w9, 0
   ; CHECK-NEXT:   $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
-  %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4, !pcsections !0
+  %ptr_unsigned = getelementptr i32, ptr %p, i32 4095
+  %val_unsigned = load atomic i32, ptr %ptr_unsigned monotonic, align 4, !pcsections !0
 
-  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
-  %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4, !pcsections !0
+  %ptr_regoff = getelementptr i32, ptr %p, i32 %off32
+  %val_regoff = load atomic i32, ptr %ptr_regoff unordered, align 4, !pcsections !0
   %tot1 = add i32 %val_unsigned, %val_regoff, !pcsections !0
 
-  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
-  %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4, !pcsections !0
+  %ptr_unscaled = getelementptr i32, ptr %p, i32 -64
+  %val_unscaled = load atomic i32, ptr %ptr_unscaled monotonic, align 4, !pcsections !0
   %tot2 = add i32 %tot1, %val_unscaled, !pcsections !0
 
-  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
-  %val_random = load atomic i32, i32* %ptr_random unordered, align 4, !pcsections !0
+  %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+  %val_random = load atomic i32, ptr %ptr_random unordered, align 4, !pcsections !0
   %tot3 = add i32 %tot2, %val_random, !pcsections !0
 
   ret i32 %tot3
 }
 
-define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
+define i64 @atomic_load_relaxed_64(ptr %p, i32 %off32) {
   ; CHECK-LABEL: name: atomic_load_relaxed_64
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $x0
@@ -489,26 +489,26 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
   ; CHECK-NEXT:   $x9 = ADDXrs killed renamable $x10, killed renamable $x9, 0
   ; CHECK-NEXT:   $x0 = ADDXrs killed renamable $x9, killed renamable $x8, 0, pcsections !0
   ; CHECK-NEXT:   RET undef $lr, implicit $x0
-  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
-  %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8, !pcsections !0
+  %ptr_unsigned = getelementptr i64, ptr %p, i32 4095
+  %val_unsigned = load atomic i64, ptr %ptr_unsigned monotonic, align 8, !pcsections !0
 
-  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
-  %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8, !pcsections !0
+  %ptr_regoff = getelementptr i64, ptr %p, i32 %off32
+  %val_regoff = load atomic i64, ptr %ptr_regoff unordered, align 8, !pcsections !0
   %tot1 = add i64 %val_unsigned, %val_regoff, !pcsections !0
 
-  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
-  %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8, !pcsections !0
+  %ptr_unscaled = getelementptr i64, ptr %p, i32 -32
+  %val_unscaled = load atomic i64, ptr %ptr_unscaled monotonic, align 8, !pcsections !0
   %tot2 = add i64 %tot1, %val_unscaled, !pcsections !0
 
-  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
-  %val_random = load atomic i64, i64* %ptr_random unordered, align 8, !pcsections !0
+  %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+  %val_random = load atomic i64, ptr %ptr_random unordered, align 8, !pcsections !0
   %tot3 = add i64 %tot2, %val_random, !pcsections !0
 
   ret i64 %tot3
 }
 
 
-define void @atomc_store(i32* %p) {
+define void @atomc_store(ptr %p) {
   ; CHECK-LABEL: name: atomc_store
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x0
@@ -516,11 +516,11 @@ define void @atomc_store(i32* %p) {
   ; CHECK-NEXT:   renamable $w8 = MOVZWi 4, 0
   ; CHECK-NEXT:   STLRW killed renamable $w8, killed renamable $x0, pcsections !0 :: (store seq_cst (s32) into %ir.p)
   ; CHECK-NEXT:   RET undef $lr
-   store atomic i32 4, i32* %p seq_cst, align 4, !pcsections !0
+   store atomic i32 4, ptr %p seq_cst, align 4, !pcsections !0
    ret void
 }
 
-define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) {
+define void @atomic_store_relaxed_8(ptr %p, i32 %off32, i8 %val) {
   ; CHECK-LABEL: name: atomic_store_relaxed_8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $w2, $x0
@@ -531,22 +531,22 @@ define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) {
   ; CHECK-NEXT:   STURBBi renamable $w2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s8) into %ir.ptr_unscaled)
   ; CHECK-NEXT:   STRBBui killed renamable $w2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s8) into %ir.ptr_random)
   ; CHECK-NEXT:   RET undef $lr
-  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
-  store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1, !pcsections !0
+  %ptr_unsigned = getelementptr i8, ptr %p, i32 4095
+  store atomic i8 %val, ptr %ptr_unsigned monotonic, align 1, !pcsections !0
 
-  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
-  store atomic i8 %val, i8* %ptr_regoff unordered, align 1, !pcsections !0
+  %ptr_regoff = getelementptr i8, ptr %p, i32 %off32
+  store atomic i8 %val, ptr %ptr_regoff unordered, align 1, !pcsections !0
 
-  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
-  store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1, !pcsections !0
+  %ptr_unscaled = getelementptr i8, ptr %p, i32 -256
+  store atomic i8 %val, ptr %ptr_unscaled monotonic, align 1, !pcsections !0
 
-  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
-  store atomic i8 %val, i8* %ptr_random unordered, align 1, !pcsections !0
+  %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+  store atomic i8 %val, ptr %ptr_random unordered, align 1, !pcsections !0
 
   ret void
 }
 
-define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) {
+define void @atomic_store_relaxed_16(ptr %p, i32 %off32, i16 %val) {
   ; CHECK-LABEL: name: atomic_store_relaxed_16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $w2, $x0
@@ -557,22 +557,22 @@ define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) {
   ; CHECK-NEXT:   STURHHi renamable $w2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s16) into %ir.ptr_unscaled)
   ; CHECK-NEXT:   STRHHui killed renamable $w2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s16) into %ir.ptr_random)
   ; CHECK-NEXT:   RET undef $lr
-  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
-  store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2, !pcsections !0
+  %ptr_unsigned = getelementptr i16, ptr %p, i32 4095
+  store atomic i16 %val, ptr %ptr_unsigned monotonic, align 2, !pcsections !0
 
-  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
-  store atomic i16 %val, i16* %ptr_regoff unordered, align 2, !pcsections !0
+  %ptr_regoff = getelementptr i16, ptr %p, i32 %off32
+  store atomic i16 %val, ptr %ptr_regoff unordered, align 2, !pcsections !0
 
-  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
-  store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2, !pcsections !0
+  %ptr_unscaled = getelementptr i16, ptr %p, i32 -128
+  store atomic i16 %val, ptr %ptr_unscaled monotonic, align 2, !pcsections !0
 
-  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
-  store atomic i16 %val, i16* %ptr_random unordered, align 2, !pcsections !0
+  %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+  store atomic i16 %val, ptr %ptr_random unordered, align 2, !pcsections !0
 
   ret void
 }
 
-define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) {
+define void @atomic_store_relaxed_32(ptr %p, i32 %off32, i32 %val) {
   ; CHECK-LABEL: name: atomic_store_relaxed_32
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $w2, $x0
@@ -583,22 +583,22 @@ define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) {
   ; CHECK-NEXT:   STURWi renamable $w2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s32) into %ir.ptr_unscaled)
   ; CHECK-NEXT:   STRWui killed renamable $w2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s32) into %ir.ptr_random)
   ; CHECK-NEXT:   RET undef $lr
-  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
-  store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4, !pcsections !0
+  %ptr_unsigned = getelementptr i32, ptr %p, i32 4095
+  store atomic i32 %val, ptr %ptr_unsigned monotonic, align 4, !pcsections !0
 
-  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
-  store atomic i32 %val, i32* %ptr_regoff unordered, align 4, !pcsections !0
+  %ptr_regoff = getelementptr i32, ptr %p, i32 %off32
+  store atomic i32 %val, ptr %ptr_regoff unordered, align 4, !pcsections !0
 
-  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
-  store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4, !pcsections !0
+  %ptr_unscaled = getelementptr i32, ptr %p, i32 -64
+  store atomic i32 %val, ptr %ptr_unscaled monotonic, align 4, !pcsections !0
 
-  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
-  store atomic i32 %val, i32* %ptr_random unordered, align 4, !pcsections !0
+  %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+  store atomic i32 %val, ptr %ptr_random unordered, align 4, !pcsections !0
 
   ret void
 }
 
-define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) {
+define void @atomic_store_relaxed_64(ptr %p, i32 %off32, i64 %val) {
   ; CHECK-LABEL: name: atomic_store_relaxed_64
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w1, $x0, $x2
@@ -609,22 +609,22 @@ define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) {
   ; CHECK-NEXT:   STURXi renamable $x2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s64) into %ir.ptr_unscaled)
   ; CHECK-NEXT:   STRXui killed renamable $x2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s64) into %ir.ptr_random)
   ; CHECK-NEXT:   RET undef $lr
-  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
-  store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8, !pcsections !0
+  %ptr_unsigned = getelementptr i64, ptr %p, i32 4095
+  store atomic i64 %val, ptr %ptr_unsigned monotonic, align 8, !pcsections !0
 
-  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
-  store atomic i64 %val, i64* %ptr_regoff unordered, align 8, !pcsections !0
+  %ptr_regoff = getelementptr i64, ptr %p, i32 %off32
+  store atomic i64 %val, ptr %ptr_regoff unordered, align 8, !pcsections !0
 
-  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
-  store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8, !pcsections !0
+  %ptr_unscaled = getelementptr i64, ptr %p, i32 -32
+  store atomic i64 %val, ptr %ptr_unscaled monotonic, align 8, !pcsections !0
 
-  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
-  store atomic i64 %val, i64* %ptr_random unordered, align 8, !pcsections !0
+  %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+  store atomic i64 %val, ptr %ptr_random unordered, align 8, !pcsections !0
 
   ret void
 }
 
-define i32 @load_zext(i8* %p8, i16* %p16) {
+define i32 @load_zext(ptr %p8, ptr %p16) {
   ; CHECK-NOLSE-LABEL: name: load_zext
   ; CHECK-NOLSE: bb.0 (%ir-block.0):
   ; CHECK-NOLSE-NEXT:   liveins: $x0, $x1
@@ -641,17 +641,17 @@ define i32 @load_zext(i8* %p8, i16* %p16) {
   ; CHECK-LDAPR-NEXT:   renamable $w9 = LDRHHui killed renamable $x1, 0, pcsections !0 :: (load unordered (s16) from %ir.p16)
   ; CHECK-LDAPR-NEXT:   renamable $w0 = ADDWrx killed renamable $w9, killed renamable $w8, 0, pcsections !0
   ; CHECK-LDAPR-NEXT:   RET undef $lr, implicit $w0
-  %val1.8 = load atomic i8, i8* %p8 acquire, align 1, !pcsections !0
+  %val1.8 = load atomic i8, ptr %p8 acquire, align 1, !pcsections !0
   %val1 = zext i8 %val1.8 to i32
 
-  %val2.16 = load atomic i16, i16* %p16 unordered, align 2, !pcsections !0
+  %val2.16 = load atomic i16, ptr %p16 unordered, align 2, !pcsections !0
   %val2 = zext i16 %val2.16 to i32
 
   %res = add i32 %val1, %val2, !pcsections !0
   ret i32 %res
 }
 
-define { i32, i64 } @load_acq(i32* %p32, i64* %p64) {
+define { i32, i64 } @load_acq(ptr %p32, ptr %p64) {
   ; CHECK-NOLSE-LABEL: name: load_acq
   ; CHECK-NOLSE: bb.0 (%ir-block.0):
   ; CHECK-NOLSE-NEXT:   liveins: $x0, $x1
@@ -666,16 +666,16 @@ define { i32, i64 } @load_acq(i32* %p32, i64* %p64) {
   ; CHECK-LDAPR-NEXT:   renamable $w0 = LDARW killed renamable $x0, pcsections !0 :: (load seq_cst (s32) from %ir.p32)
   ; CHECK-LDAPR-NEXT:   renamable $x1 = LDAPRX killed renamable $x1, pcsections !0 :: (load acquire (s64) from %ir.p64)
   ; CHECK-LDAPR-NEXT:   RET undef $lr, implicit $w0, implicit $x1
-  %val32 = load atomic i32, i32* %p32 seq_cst, align 4, !pcsections !0
+  %val32 = load atomic i32, ptr %p32 seq_cst, align 4, !pcsections !0
   %tmp = insertvalue { i32, i64 } undef, i32 %val32, 0
 
-  %val64 = load atomic i64, i64* %p64 acquire, align 8, !pcsections !0
+  %val64 = load atomic i64, ptr %p64 acquire, align 8, !pcsections !0
   %res = insertvalue { i32, i64 } %tmp, i64 %val64, 1
 
   ret { i32, i64 } %res
 }
 
-define i32 @load_sext(i8* %p8, i16* %p16) {
+define i32 @load_sext(ptr %p8, ptr %p16) {
   ; CHECK-NOLSE-LABEL: name: load_sext
   ; CHECK-NOLSE: bb.0 (%ir-block.0):
   ; CHECK-NOLSE-NEXT:   liveins: $x0, $x1
@@ -694,17 +694,17 @@ define i32 @load_sext(i8* %p8, i16* %p16) {
   ; CHECK-LDAPR-NEXT:   renamable $w9 = SBFMWri killed renamable $w9, 0, 15
   ; CHECK-LDAPR-NEXT:   renamable $w0 = ADDWrx killed renamable $w9, killed renamable $w8, 32, pcsections !0
   ; CHECK-LDAPR-NEXT:   RET undef $lr, implicit $w0
-  %val1.8 = load atomic i8, i8* %p8 acquire, align 1, !pcsections !0
+  %val1.8 = load atomic i8, ptr %p8 acquire, align 1, !pcsections !0
   %val1 = sext i8 %val1.8 to i32
 
-  %val2.16 = load atomic i16, i16* %p16 unordered, align 2, !pcsections !0
+  %val2.16 = load atomic i16, ptr %p16 unordered, align 2, !pcsections !0
   %val2 = sext i16 %val2.16 to i32
 
   %res = add i32 %val1, %val2, !pcsections !0
   ret i32 %res
 }
 
-define void @store_trunc(i32 %val, i8* %p8, i16* %p16) {
+define void @store_trunc(i32 %val, ptr %p8, ptr %p16) {
   ; CHECK-LABEL: name: store_trunc
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $w0, $x1, $x2
@@ -713,15 +713,15 @@ define void @store_trunc(i32 %val, i8* %p8, i16* %p16) {
   ; CHECK-NEXT:   STRHHui killed renamable $w0, killed renamable $x2, 0, pcsections !0 :: (store monotonic (s16) into %ir.p16)
   ; CHECK-NEXT:   RET undef $lr
   %val8 = trunc i32 %val to i8
-  store atomic i8 %val8, i8* %p8 seq_cst, align 1, !pcsections !0
+  store atomic i8 %val8, ptr %p8 seq_cst, align 1, !pcsections !0
 
   %val16 = trunc i32 %val to i16
-  store atomic i16 %val16, i16* %p16 monotonic, align 2, !pcsections !0
+  store atomic i16 %val16, ptr %p16 monotonic, align 2, !pcsections !0
 
   ret void
 }
 
-define i8 @atomicrmw_add_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_add_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -742,11 +742,11 @@ define i8 @atomicrmw_add_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw add i8* %ptr, i8 %rhs seq_cst, !pcsections !0
+  %res = atomicrmw add ptr %ptr, i8 %rhs seq_cst, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_xchg_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_xchg_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_xchg_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -767,11 +767,11 @@ define i8 @atomicrmw_xchg_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw xchg i8* %ptr, i8 %rhs monotonic, !pcsections !0
+  %res = atomicrmw xchg ptr %ptr, i8 %rhs monotonic, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_sub_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_sub_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_sub_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -792,11 +792,11 @@ define i8 @atomicrmw_sub_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw sub i8* %ptr, i8 %rhs acquire, !pcsections !0
+  %res = atomicrmw sub ptr %ptr, i8 %rhs acquire, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_and_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_and_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_and_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -817,11 +817,11 @@ define i8 @atomicrmw_and_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw and i8* %ptr, i8 %rhs release, !pcsections !0
+  %res = atomicrmw and ptr %ptr, i8 %rhs release, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_or_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_or_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_or_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -842,11 +842,11 @@ define i8 @atomicrmw_or_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw or i8* %ptr, i8 %rhs seq_cst, !pcsections !0
+  %res = atomicrmw or ptr %ptr, i8 %rhs seq_cst, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_xor_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_xor_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_xor_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -867,11 +867,11 @@ define i8 @atomicrmw_xor_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw xor i8* %ptr, i8 %rhs monotonic, !pcsections !0
+  %res = atomicrmw xor ptr %ptr, i8 %rhs monotonic, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_min_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_min_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -894,11 +894,11 @@ define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw min i8* %ptr, i8 %rhs acquire, !pcsections !0
+  %res = atomicrmw min ptr %ptr, i8 %rhs acquire, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_max_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_max_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -921,11 +921,11 @@ define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw max i8* %ptr, i8 %rhs release, !pcsections !0
+  %res = atomicrmw max ptr %ptr, i8 %rhs release, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_umin_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_umin_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -949,11 +949,11 @@ define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw umin i8* %ptr, i8 %rhs seq_cst, !pcsections !0
+  %res = atomicrmw umin ptr %ptr, i8 %rhs seq_cst, !pcsections !0
   ret i8 %res
 }
 
-define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
+define i8 @atomicrmw_umax_i8(ptr %ptr, i8 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_umax_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -977,11 +977,11 @@ define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw umax i8* %ptr, i8 %rhs monotonic, !pcsections !0
+  %res = atomicrmw umax ptr %ptr, i8 %rhs monotonic, !pcsections !0
   ret i8 %res
 }
 
-define i16 @atomicrmw_add_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_add_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1002,11 +1002,11 @@ define i16 @atomicrmw_add_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw add i16* %ptr, i16 %rhs seq_cst, !pcsections !0
+  %res = atomicrmw add ptr %ptr, i16 %rhs seq_cst, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_xchg_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_xchg_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_xchg_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1027,11 +1027,11 @@ define i16 @atomicrmw_xchg_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw xchg i16* %ptr, i16 %rhs monotonic, !pcsections !0
+  %res = atomicrmw xchg ptr %ptr, i16 %rhs monotonic, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_sub_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_sub_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_sub_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1052,11 +1052,11 @@ define i16 @atomicrmw_sub_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw sub i16* %ptr, i16 %rhs acquire, !pcsections !0
+  %res = atomicrmw sub ptr %ptr, i16 %rhs acquire, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_and_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_and_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_and_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1077,11 +1077,11 @@ define i16 @atomicrmw_and_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw and i16* %ptr, i16 %rhs release, !pcsections !0
+  %res = atomicrmw and ptr %ptr, i16 %rhs release, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_or_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_or_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_or_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1102,11 +1102,11 @@ define i16 @atomicrmw_or_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw or i16* %ptr, i16 %rhs seq_cst, !pcsections !0
+  %res = atomicrmw or ptr %ptr, i16 %rhs seq_cst, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_xor_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_xor_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_xor_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1127,11 +1127,11 @@ define i16 @atomicrmw_xor_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw xor i16* %ptr, i16 %rhs monotonic, !pcsections !0
+  %res = atomicrmw xor ptr %ptr, i16 %rhs monotonic, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_min_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_min_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1154,11 +1154,11 @@ define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw min i16* %ptr, i16 %rhs acquire, !pcsections !0
+  %res = atomicrmw min ptr %ptr, i16 %rhs acquire, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_max_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_max_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1181,11 +1181,11 @@ define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw max i16* %ptr, i16 %rhs release, !pcsections !0
+  %res = atomicrmw max ptr %ptr, i16 %rhs release, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_umin_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_umin_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1209,11 +1209,11 @@ define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw umin i16* %ptr, i16 %rhs seq_cst, !pcsections !0
+  %res = atomicrmw umin ptr %ptr, i16 %rhs seq_cst, !pcsections !0
   ret i16 %res
 }
 
-define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
+define i16 @atomicrmw_umax_i16(ptr %ptr, i16 %rhs) {
   ; CHECK-LABEL: name: atomicrmw_umax_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1237,11 +1237,11 @@ define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8
   ; CHECK-NEXT:   RET undef $lr, implicit $w0
-  %res = atomicrmw umax i16* %ptr, i16 %rhs monotonic, !pcsections !0
+  %res = atomicrmw umax ptr %ptr, i16 %rhs monotonic, !pcsections !0
   ret i16 %res
 }
 
-define { i8, i1 } @cmpxchg_i8(i8* %ptr, i8 %desired, i8 %new) {
+define { i8, i1 } @cmpxchg_i8(ptr %ptr, i8 %desired, i8 %new) {
   ; CHECK-LABEL: name: cmpxchg_i8
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1280,11 +1280,11 @@ define { i8, i1 } @cmpxchg_i8(i8* %ptr, i8 %desired, i8 %new) {
   ; CHECK-NEXT:   CLREX 15, pcsections !0
   ; CHECK-NEXT:   $w0 = KILL renamable $w0, implicit killed $x0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0, implicit $w1
-  %res = cmpxchg i8* %ptr, i8 %desired, i8 %new monotonic monotonic, !pcsections !0
+  %res = cmpxchg ptr %ptr, i8 %desired, i8 %new monotonic monotonic, !pcsections !0
   ret { i8, i1 } %res
 }
 
-define { i16, i1 } @cmpxchg_i16(i16* %ptr, i16 %desired, i16 %new) {
+define { i16, i1 } @cmpxchg_i16(ptr %ptr, i16 %desired, i16 %new) {
   ; CHECK-LABEL: name: cmpxchg_i16
   ; CHECK: bb.0 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
@@ -1323,7 +1323,7 @@ define { i16, i1 } @cmpxchg_i16(i16* %ptr, i16 %desired, i16 %new) {
   ; CHECK-NEXT:   CLREX 15, pcsections !0
   ; CHECK-NEXT:   $w0 = KILL renamable $w0, implicit killed $x0
   ; CHECK-NEXT:   RET undef $lr, implicit $w0, implicit $w1
-  %res = cmpxchg i16* %ptr, i16 %desired, i16 %new monotonic monotonic, !pcsections !0
+  %res = cmpxchg ptr %ptr, i16 %desired, i16 %new monotonic monotonic, !pcsections !0
   ret { i16, i1 } %res
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/assert-align.ll b/llvm/test/CodeGen/AArch64/GlobalISel/assert-align.ll
index f9ce504c3d68..29654c3ba229 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/assert-align.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/assert-align.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -global-isel -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
 
-declare i8* @foo()
+declare ptr @foo()
 
 define void @call_assert_align() {
 ; CHECK-LABEL: call_assert_align:
@@ -13,16 +13,16 @@ define void @call_assert_align() {
 ; CHECK-NEXT:    strb wzr, [x0]
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %ptr = call align 8 i8* @foo()
-  store i8 0, i8* %ptr
+  %ptr = call align 8 ptr @foo()
+  store i8 0, ptr %ptr
   ret void
 }
 
-define i8* @tailcall_assert_align() {
+define ptr @tailcall_assert_align() {
 ; CHECK-LABEL: tailcall_assert_align:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    b foo
 entry:
-  %call = tail call align 4 i8* @foo()
-  ret i8* %call
+  %call = tail call align 4 ptr @foo()
+  ret ptr %call
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/builtin-return-address-pacret.ll b/llvm/test/CodeGen/AArch64/GlobalISel/builtin-return-address-pacret.ll
index 436662e3909e..dbb86b086aa5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/builtin-return-address-pacret.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/builtin-return-address-pacret.ll
@@ -1,19 +1,19 @@
 ;; RUN: llc -mtriple aarch64               -global-isel -O0 %s -o - | FileCheck -enable-var-scope %s --check-prefixes=CHECK,CHECK-NOP
 ;; RUN: llc -mtriple aarch64 -mattr=+v8.3a -global-isel -O0 %s -o - | FileCheck -enable-var-scope %s --check-prefixes=CHECK,CHECK-V83
 declare void @g0() #1
-declare void @g1(i8*) #1
-declare void @g2(i32, i8*) #1
+declare void @g1(ptr) #1
+declare void @g2(i32, ptr) #1
 
-declare i8* @llvm.returnaddress(i32 immarg) #2
+declare ptr @llvm.returnaddress(i32 immarg) #2
 
-define i8* @f0() #0 {
+define ptr @f0() #0 {
 entry:
-  %0 = call i8* @llvm.returnaddress(i32 0)
-  call void @g1(i8* %0)
-  %1 = call i8* @llvm.returnaddress(i32 1)
-  call void @g2(i32 1, i8* %1)
-  %2 = call i8* @llvm.returnaddress(i32 2)
-  ret i8* %2
+  %0 = call ptr @llvm.returnaddress(i32 0)
+  call void @g1(ptr %0)
+  %1 = call ptr @llvm.returnaddress(i32 1)
+  call void @g2(i32 1, ptr %1)
+  %2 = call ptr @llvm.returnaddress(i32 2)
+  ret ptr %2
 }
 ;; CHECK-LABEL:    f0:
 ;; CHECK-NOT:      {{(mov|ldr)}} x30
@@ -35,14 +35,14 @@ entry:
 ;; CHECK-V83-NEXT: ldr x0, [x[[T1]], #8]
 ;; CHECK-V83-NEXT: xpaci x0
 
-define i8* @f1() #0 {
+define ptr @f1() #0 {
 entry:
-  %0 = call i8* @llvm.returnaddress(i32 1)
-  call void @g1(i8* %0)
-  %1 = call i8* @llvm.returnaddress(i32 2)
-  call void @g2(i32 1, i8* %1)
-  %2 = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %2
+  %0 = call ptr @llvm.returnaddress(i32 1)
+  call void @g1(ptr %0)
+  %1 = call ptr @llvm.returnaddress(i32 2)
+  call void @g2(i32 1, ptr %1)
+  %2 = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %2
 }
 ;; CHECK-LABEL:    f1:
 ;; CHECK-DAG:      ldr x[[T0:[0-9]+]], [x29]
@@ -71,11 +71,11 @@ entry:
 ;; CHECK-NOT:      x0
 ;; CHECK:          ret
 
-define i8* @f2() #0 {
+define ptr @f2() #0 {
 entry:
-  call void bitcast (void ()* @g0 to void ()*)()
-  %0 = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  call void @g0()
+  %0 = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 ;; CHECK-LABEL:    f2
 ;; CHECK:          bl g0
@@ -88,10 +88,10 @@ entry:
 ;; CHECK-NOT:      x0
 ;; CHECK:          ret
 
-define i8* @f3() #0 {
+define ptr @f3() #0 {
 entry:
-  %0 = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 ;; CHECK-LABEL:    f3:
 ;; CHECK-NOP:      str x30, [sp,

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
index 85dfab130ae3..9e09282767bd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -global-isel -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
 
-declare void @byval_i32(i32* byval(i32) %ptr)
+declare void @byval_i32(ptr byval(i32) %ptr)
 
-define void @call_byval_i32(i32* %incoming) uwtable {
+define void @call_byval_i32(ptr %incoming) uwtable {
 ; CHECK-LABEL: call_byval_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -18,13 +18,13 @@ define void @call_byval_i32(i32* %incoming) uwtable {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
-  call void @byval_i32(i32* byval(i32) %incoming)
+  call void @byval_i32(ptr byval(i32) %incoming)
   ret void
 }
 
-declare void @byval_a64i32([64 x i32]* byval([64 x i32]) %ptr)
+declare void @byval_a64i32(ptr byval([64 x i32]) %ptr)
 
-define void @call_byval_a64i32([64 x i32]* %incoming) uwtable {
+define void @call_byval_a64i32(ptr %incoming) uwtable {
 ; CHECK-LABEL: call_byval_a64i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #288
@@ -78,6 +78,6 @@ define void @call_byval_a64i32([64 x i32]* %incoming) uwtable {
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
-  call void @byval_a64i32([64 x i32]* byval([64 x i32]) %incoming)
+  call void @byval_a64i32(ptr byval([64 x i32]) %incoming)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-const-bitcast-func.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-const-bitcast-func.ll
index 07a6297ac76e..ff813e54fa9d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-const-bitcast-func.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-const-bitcast-func.ll
@@ -3,12 +3,12 @@
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-darwin-ios13.0"
 
-declare i8* @objc_msgSend(i8*, i8*, ...)
+declare ptr @objc_msgSend(ptr, ptr, ...)
 define void @call_bitcast_ptr_const() {
 ; CHECK-LABEL: @call_bitcast_ptr_const
 ; CHECK: bl _objc_msgSend
 ; CHECK-NOT: blr
 entry:
-  call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, [2 x i32], i32, float)*)(i8* undef, i8* undef, [2 x i32] zeroinitializer, i32 0, float 1.000000e+00)
+  call void @objc_msgSend(ptr undef, ptr undef, [2 x i32] zeroinitializer, i32 0, float 1.000000e+00)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
index aebf7f27f318..4aac649f5547 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll
@@ -13,14 +13,14 @@
 ; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store (s64) into stack, align 1)
 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]](s64)
 ; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store (s64) into stack + 8, align 1)
-define void @test_split_struct([2 x i64]* %ptr) {
-  %struct = load [2 x i64], [2 x i64]* %ptr
-  call void @take_split_struct([2 x i64]* null, i64 1, i64 2, i64 3,
+define void @test_split_struct(ptr %ptr) {
+  %struct = load [2 x i64], ptr %ptr
+  call void @take_split_struct(ptr null, i64 1, i64 2, i64 3,
                                i64 4, i64 5, i64 6,
                                [2 x i64] %struct)
   ret void
 }
 
-declare void @take_split_struct([2 x i64]* %ptr, i64, i64, i64,
+declare void @take_split_struct(ptr %ptr, i64, i64, i64,
                                i64, i64, i64,
                                [2 x i64] %in) ;

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
index 2506b7f32cb2..b10c887e09c2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
@@ -43,9 +43,9 @@ define void @test_call_stack() {
 ; CHECK: $x1 = COPY
 ; CHECK: $x2 = COPY
 ; CHECK: BL @take_128bit_struct
-define void @test_128bit_struct([2 x i64]* %ptr) {
-  %struct = load [2 x i64], [2 x i64]* %ptr
-  call void @take_128bit_struct([2 x i64]* null, [2 x i64] %struct)
+define void @test_128bit_struct(ptr %ptr) {
+  %struct = load [2 x i64], ptr %ptr
+  call void @take_128bit_struct(ptr null, [2 x i64] %struct)
   ret void
 }
 
@@ -53,8 +53,8 @@ define void @test_128bit_struct([2 x i64]* %ptr) {
 ; CHECK: {{%.*}}:_(p0) = COPY $x0
 ; CHECK: {{%.*}}:_(s64) = COPY $x1
 ; CHECK: {{%.*}}:_(s64) = COPY $x2
-define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
-  store [2 x i64] %in, [2 x i64]* %ptr
+define void @take_128bit_struct(ptr %ptr, [2 x i64] %in) {
+  store [2 x i64] %in, ptr %ptr
   ret void
 }
 
@@ -71,9 +71,9 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
 
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]]
 ; CHECK: G_STORE [[LD2]](s64), [[ADDR]](p0) :: (store (s64) into stack + 8, align 1)
-define void @test_split_struct([2 x i64]* %ptr) {
-  %struct = load [2 x i64], [2 x i64]* %ptr
-  call void @take_split_struct([2 x i64]* null, i64 1, i64 2, i64 3,
+define void @test_split_struct(ptr %ptr) {
+  %struct = load [2 x i64], ptr %ptr
+  call void @take_split_struct(ptr null, i64 1, i64 2, i64 3,
                                i64 4, i64 5, i64 6,
                                [2 x i64] %struct)
   ret void
@@ -89,9 +89,9 @@ define void @test_split_struct([2 x i64]* %ptr) {
 
 ; CHECK: [[HIPTR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[HI_FRAME]]
 ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[HIPTR]](p0) :: (invariant load (s64) from %fixed-stack.[[HI_FRAME]])
-define void @take_split_struct([2 x i64]* %ptr, i64, i64, i64,
+define void @take_split_struct(ptr %ptr, i64, i64, i64,
                                i64, i64, i64,
                                [2 x i64] %in) {
-  store [2 x i64] %in, [2 x i64]* %ptr
+  store [2 x i64] %in, ptr %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll
index cb8c31be256e..ecd7c3ca71be 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call-sret.ll
@@ -2,22 +2,22 @@
 ; RUN: llc < %s -mtriple arm64-apple-darwin -global-isel -stop-after=irtranslator -verify-machineinstrs | FileCheck %s
 
 ; Check that we don't try to tail-call with a non-forwarded sret parameter.
-declare void @test_explicit_sret(i64* sret(i64))
+declare void @test_explicit_sret(ptr sret(i64))
 
 ; Forwarded explicit sret pointer => we can tail call.
-define void @can_tail_call_forwarded_explicit_sret_ptr(i64* sret(i64) %arg) {
+define void @can_tail_call_forwarded_explicit_sret_ptr(ptr sret(i64) %arg) {
   ; CHECK-LABEL: name: can_tail_call_forwarded_explicit_sret_ptr
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x8
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x8
   ; CHECK:   $x8 = COPY [[COPY]](p0)
   ; CHECK:   TCRETURNdi @test_explicit_sret, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $x8
-  tail call void @test_explicit_sret(i64* %arg)
+  tail call void @test_explicit_sret(ptr %arg)
   ret void
 }
 
 ; Not marked as tail, so don't tail call.
-define void @test_call_explicit_sret(i64* sret(i64) %arg) {
+define void @test_call_explicit_sret(ptr sret(i64) %arg) {
   ; CHECK-LABEL: name: test_call_explicit_sret
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x8
@@ -27,7 +27,7 @@ define void @test_call_explicit_sret(i64* sret(i64) %arg) {
   ; CHECK:   BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
   ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK:   RET_ReallyLR
-  call void @test_explicit_sret(i64* %arg)
+  call void @test_explicit_sret(ptr %arg)
   ret void
 }
 
@@ -41,11 +41,11 @@ define void @dont_tail_call_explicit_sret_alloca_unused() {
   ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK:   RET_ReallyLR
   %l = alloca i64, align 8
-  tail call void @test_explicit_sret(i64* %l)
+  tail call void @test_explicit_sret(ptr %l)
   ret void
 }
 
-define void @dont_tail_call_explicit_sret_alloca_dummyusers(i64* %ptr) {
+define void @dont_tail_call_explicit_sret_alloca_dummyusers(ptr %ptr) {
   ; CHECK-LABEL: name: dont_tail_call_explicit_sret_alloca_dummyusers
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -59,13 +59,13 @@ define void @dont_tail_call_explicit_sret_alloca_dummyusers(i64* %ptr) {
   ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK:   RET_ReallyLR
   %l = alloca i64, align 8
-  %r = load i64, i64* %ptr, align 8
-  store i64 %r, i64* %l, align 8
-  tail call void @test_explicit_sret(i64* %l)
+  %r = load i64, ptr %ptr, align 8
+  store i64 %r, ptr %l, align 8
+  tail call void @test_explicit_sret(ptr %l)
   ret void
 }
 
-define void @dont_tail_call_tailcall_explicit_sret_gep(i64* %ptr) {
+define void @dont_tail_call_tailcall_explicit_sret_gep(ptr %ptr) {
   ; CHECK-LABEL: name: dont_tail_call_tailcall_explicit_sret_gep
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -77,8 +77,8 @@ define void @dont_tail_call_tailcall_explicit_sret_gep(i64* %ptr) {
   ; CHECK:   BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
   ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK:   RET_ReallyLR
-  %ptr2 = getelementptr i64, i64* %ptr, i32 1
-  tail call void @test_explicit_sret(i64* %ptr2)
+  %ptr2 = getelementptr i64, ptr %ptr, i32 1
+  tail call void @test_explicit_sret(ptr %ptr2)
   ret void
 }
 
@@ -94,7 +94,7 @@ define i64 @dont_tail_call_sret_alloca_returned() {
   ; CHECK:   $x0 = COPY [[LOAD]](s64)
   ; CHECK:   RET_ReallyLR implicit $x0
   %l = alloca i64, align 8
-  tail call void @test_explicit_sret(i64* %l)
-  %r = load i64, i64* %l, align 8
+  tail call void @test_explicit_sret(ptr %l)
+  %r = load i64, ptr %l, align 8
   ret i64 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
index 6360538a2822..d6a80bc81040 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
@@ -16,7 +16,7 @@ define void @tail_call() {
 
 ; We should get a TCRETURNri here.
 ; FIXME: We don't need the COPY.
-define void @indirect_tail_call(void()* %func) {
+define void @indirect_tail_call(ptr %func) {
   ; DARWIN-LABEL: name: indirect_tail_call
   ; DARWIN: bb.1 (%ir-block.0):
   ; DARWIN-NEXT:   liveins: $x0
@@ -313,7 +313,7 @@ define void @test_bad_call_conv() {
 }
 
 ; Shouldn't tail call when the caller has byval arguments.
-define void @test_byval(i8* byval(i8) %ptr) {
+define void @test_byval(ptr byval(i8) %ptr) {
   ; DARWIN-LABEL: name: test_byval
   ; DARWIN: bb.1 (%ir-block.0):
   ; DARWIN-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -335,7 +335,7 @@ define void @test_byval(i8* byval(i8) %ptr) {
 }
 
 ; Shouldn't tail call when the caller has inreg arguments.
-define void @test_inreg(i8* inreg %ptr) {
+define void @test_inreg(ptr inreg %ptr) {
   ; DARWIN-LABEL: name: test_inreg
   ; DARWIN: bb.1 (%ir-block.0):
   ; DARWIN-NEXT:   liveins: $x0
@@ -386,8 +386,8 @@ entry:
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 define void @test_lifetime() local_unnamed_addr {
   ; DARWIN-LABEL: name: test_lifetime
   ; DARWIN: bb.1.entry:
@@ -401,18 +401,18 @@ define void @test_lifetime() local_unnamed_addr {
   ; WINDOWS-NEXT:   TCRETURNdi @nonvoid_ret, 0, csr_aarch64_aapcs, implicit $sp
 entry:
   %t = alloca i8, align 1
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %t)
+  call void @llvm.lifetime.start.p0(i64 1, ptr %t)
   %x = tail call i32 @nonvoid_ret()
   %y = icmp ne i32 %x, 0
-  tail call void @llvm.lifetime.end.p0i8(i64 1, i8* %t)
+  tail call void @llvm.lifetime.end.p0(i64 1, ptr %t)
   ret void
 }
 
 ; We can tail call when the callee swiftself is the same as the caller one.
 ; It would be nice to move this to swiftself.ll, but it's important to verify
 ; that we get the COPY that makes this safe in the first place.
-declare i8* @pluto()
-define hidden swiftcc i64 @swiftself_indirect_tail(i64* swiftself %arg) {
+declare ptr @pluto()
+define hidden swiftcc i64 @swiftself_indirect_tail(ptr swiftself %arg) {
   ; DARWIN-LABEL: name: swiftself_indirect_tail
   ; DARWIN: bb.1 (%ir-block.0):
   ; DARWIN-NEXT:   liveins: $x20
@@ -435,15 +435,14 @@ define hidden swiftcc i64 @swiftself_indirect_tail(i64* swiftself %arg) {
   ; WINDOWS-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; WINDOWS-NEXT:   $x20 = COPY [[COPY]](p0)
   ; WINDOWS-NEXT:   TCRETURNri [[COPY1]](p0), 0, csr_aarch64_aapcs, implicit $sp, implicit $x20
-  %tmp = call i8* @pluto()
-  %tmp1 = bitcast i8* %tmp to i64 (i64*)*
-  %tmp2 = tail call swiftcc i64 %tmp1(i64* swiftself %arg)
+  %tmp = call ptr @pluto()
+  %tmp2 = tail call swiftcc i64 %tmp(ptr swiftself %arg)
   ret i64 %tmp2
 }
 
 ; Verify that we can tail call musttail callees.
-declare void @must_callee(i8*)
-define void @foo(i32*) {
+declare void @must_callee(ptr)
+define void @foo(ptr) {
   ; DARWIN-LABEL: name: foo
   ; DARWIN: bb.1 (%ir-block.1):
   ; DARWIN-NEXT:   liveins: $x0
@@ -460,7 +459,7 @@ define void @foo(i32*) {
   ; WINDOWS-NEXT:   [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
   ; WINDOWS-NEXT:   $x0 = COPY [[C]](p0)
   ; WINDOWS-NEXT:   TCRETURNdi @must_callee, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0
-  musttail call void @must_callee(i8* null)
+  musttail call void @must_callee(ptr null)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
index 2339157f5736..a46df8009016 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
@@ -26,7 +26,7 @@ define [2 x i64] @test_musttail_variadic_aggret(i32 %arg0, ...) {
 ; Test musttailing with a normal call in the block. Test that we spill and
 ; restore, as a normal call will clobber all argument registers.
 @asdf = internal constant [4 x i8] c"asdf"
-declare void @puts(i8*)
+declare void @puts(ptr)
 define i32 @test_musttail_variadic_spill(i32 %arg0, ...) {
 ; CHECK-LABEL: test_musttail_variadic_spill:
 ; CHECK:       ; %bb.0:
@@ -90,16 +90,16 @@ define i32 @test_musttail_variadic_spill(i32 %arg0, ...) {
 ; CHECK-NEXT:    add sp, sp, #224
 ; CHECK-NEXT:    b _musttail_variadic_callee
 ; CHECK-NEXT:    .loh AdrpAdd Lloh0, Lloh1
-  call void @puts(i8* getelementptr ([4 x i8], [4 x i8]* @asdf, i32 0, i32 0))
+  call void @puts(ptr @asdf)
   %r = musttail call i32 (i32, ...) @musttail_variadic_callee(i32 %arg0, ...)
   ret i32 %r
 }
 
 ; Test musttailing with a varargs call in the block. Test that we spill and
 ; reload all arguments in the variadic argument pack.
-declare void @llvm.va_start(i8*) nounwind
-declare void(i8*, ...)* @get_f(i8* %this)
-define void @f_thunk(i8* %this, ...) {
+declare void @llvm.va_start(ptr) nounwind
+declare ptr @get_f(ptr %this)
+define void @f_thunk(ptr %this, ...) {
 ; CHECK-LABEL: f_thunk:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #256
@@ -161,29 +161,27 @@ define void @f_thunk(i8* %this, ...) {
 ; CHECK-NEXT:    ldp x28, x27, [sp, #160] ; 16-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #256
 ; CHECK-NEXT:    br x9
-  %ap = alloca [4 x i8*], align 16
-  %ap_i8 = bitcast [4 x i8*]* %ap to i8*
-  call void @llvm.va_start(i8* %ap_i8)
-  %fptr = call void(i8*, ...)*(i8*) @get_f(i8* %this)
-  musttail call void (i8*, ...) %fptr(i8* %this, ...)
+  %ap = alloca [4 x ptr], align 16
+  call void @llvm.va_start(ptr %ap)
+  %fptr = call ptr(ptr) @get_f(ptr %this)
+  musttail call void (ptr, ...) %fptr(ptr %this, ...)
   ret void
 }
 
 ; We don't need any spills and reloads here, but we should still emit the
 ; copies in call lowering.
-define void @g_thunk(i8* %fptr_i8, ...) {
+define void @g_thunk(ptr %fptr_i8, ...) {
 ; CHECK-LABEL: g_thunk:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    br x0
-  %fptr = bitcast i8* %fptr_i8 to void (i8*, ...)*
-  musttail call void (i8*, ...) %fptr(i8* %fptr_i8, ...)
+  musttail call void (ptr, ...) %fptr_i8(ptr %fptr_i8, ...)
   ret void
 }
 
 ; Test that this works with multiple exits and basic blocks.
-%struct.Foo = type { i1, i8*, i8* }
+%struct.Foo = type { i1, ptr, ptr }
 @g = external global i32
-define void @h_thunk(%struct.Foo* %this, ...) {
+define void @h_thunk(ptr %this, ...) {
 ; CHECK-LABEL: h_thunk:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrb w9, [x0]
@@ -202,22 +200,19 @@ define void @h_thunk(%struct.Foo* %this, ...) {
 ; CHECK-NEXT:    str w11, [x10]
 ; CHECK-NEXT:    br x9
 ; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh2, Lloh3, Lloh4
-  %cond_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 0
-  %cond = load i1, i1* %cond_p
+  %cond = load i1, ptr %this
   br i1 %cond, label %then, label %else
 
 then:
-  %a_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 1
-  %a_i8 = load i8*, i8** %a_p
-  %a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
-  musttail call void (%struct.Foo*, ...) %a(%struct.Foo* %this, ...)
+  %a_p = getelementptr %struct.Foo, ptr %this, i32 0, i32 1
+  %a_i8 = load ptr, ptr %a_p
+  musttail call void (ptr, ...) %a_i8(ptr %this, ...)
   ret void
 
 else:
-  %b_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 2
-  %b_i8 = load i8*, i8** %b_p
-  %b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
-  store i32 42, i32* @g
-  musttail call void (%struct.Foo*, ...) %b(%struct.Foo* %this, ...)
+  %b_p = getelementptr %struct.Foo, ptr %this, i32 0, i32 2
+  %b_i8 = load ptr, ptr %b_p
+  store i32 42, ptr @g
+  musttail call void (ptr, ...) %b_i8(ptr %this, ...)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
index 48939351ac04..d1d6928749f2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
@@ -39,7 +39,7 @@ define void @test_simple_arg(i32 %in) {
 ; CHECK: %[[FUNC]]:gpr64(p0) = COPY $x0
 ; CHECK: BLR %[[FUNC]](p0), csr_aarch64_aapcs, implicit-def $lr, implicit $sp
 ; CHECK: RET_ReallyLR
-define void @test_indirect_call(void()* %func) {
+define void @test_indirect_call(ptr %func) {
   call void %func()
   ret void
 }
@@ -73,8 +73,8 @@ define void @test_multiple_args(i64 %in) {
 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST2]](s64)
 ; CHECK: G_STORE [[I8]](s8), [[GEP2]](p0) :: (store (s8) into %ir.addr + 16, align 8)
 ; CHECK: RET_ReallyLR
-define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr) {
-  store {double, i64, i8} %in, {double, i64, i8}* %addr
+define void @test_struct_formal({double, i64, i8} %in, ptr %addr) {
+  store {double, i64, i8} %in, ptr %addr
   ret void
 }
 
@@ -94,8 +94,8 @@ define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr)
 ; CHECK: $x0 = COPY [[LD2]](s64)
 ; CHECK: $w1 = COPY [[LD3]](s32)
 ; CHECK: RET_ReallyLR implicit $d0, implicit $x0, implicit $w1
-define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
-  %val = load {double, i64, i32}, {double, i64, i32}* %addr
+define {double, i64, i32} @test_struct_return(ptr %addr) {
+  %val = load {double, i64, i32}, ptr %addr
   ret {double, i64, i32} %val
 }
 
@@ -123,8 +123,8 @@ define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
 ; CHECK: [[E3:%[0-9]+]]:_(s64) = COPY $x3
 ; CHECK: $x0 = COPY [[E1]]
 declare [4 x i64] @arr_callee([4 x i64])
-define i64 @test_arr_call([4 x i64]* %addr) {
-  %arg = load [4 x i64], [4 x i64]* %addr
+define i64 @test_arr_call(ptr %addr) {
+  %arg = load [4 x i64], ptr %addr
   %res = call [4 x i64] @arr_callee([4 x i64] %arg)
   %val = extractvalue [4 x i64] %res, 1
   ret i64 %val
@@ -143,8 +143,8 @@ define i64 @test_arr_call([4 x i64]* %addr) {
 ; CHECK: $w0 = COPY [[ZVAL]](s32)
 ; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0
 declare void @take_char(i8)
-define void @test_abi_exts_call(i8* %addr) {
-  %val = load i8, i8* %addr
+define void @test_abi_exts_call(ptr %addr) {
+  %val = load i8, ptr %addr
   call void @take_char(i8 %val)
   call void @take_char(i8 signext %val)
   call void @take_char(i8 zeroext %val)
@@ -163,8 +163,8 @@ define void @test_abi_exts_call(i8* %addr) {
 ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
 ; CHECK:   RET_ReallyLR
 declare void @has_zext_param(i8 zeroext)
-define void @test_zext_in_callee(i8* %addr) {
-  %val = load i8, i8* %addr
+define void @test_zext_in_callee(ptr %addr) {
+  %val = load i8, ptr %addr
   call void @has_zext_param(i8 %val)
   ret void
 }
@@ -181,8 +181,8 @@ define void @test_zext_in_callee(i8* %addr) {
 ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
 ; CHECK:   RET_ReallyLR
 declare void @has_sext_param(i8 signext)
-define void @test_sext_in_callee(i8* %addr) {
-  %val = load i8, i8* %addr
+define void @test_sext_in_callee(ptr %addr) {
+  %val = load i8, ptr %addr
   call void @has_sext_param(i8 %val)
   ret void
 }
@@ -192,8 +192,8 @@ define void @test_sext_in_callee(i8* %addr) {
 ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_SEXT [[VAL]](s8)
 ; CHECK: $w0 = COPY [[SVAL]](s32)
 ; CHECK: RET_ReallyLR implicit $w0
-define signext i8 @test_abi_sext_ret(i8* %addr) {
-  %val = load i8, i8* %addr
+define signext i8 @test_abi_sext_ret(ptr %addr) {
+  %val = load i8, ptr %addr
   ret i8 %val
 }
 
@@ -202,8 +202,8 @@ define signext i8 @test_abi_sext_ret(i8* %addr) {
 ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_ZEXT [[VAL]](s8)
 ; CHECK: $w0 = COPY [[SVAL]](s32)
 ; CHECK: RET_ReallyLR implicit $w0
-define zeroext i8 @test_abi_zext_ret(i8* %addr) {
-  %val = load i8, i8* %addr
+define zeroext i8 @test_abi_zext_ret(ptr %addr) {
+  %val = load i8, ptr %addr
   ret i8 %val
 }
 
@@ -220,9 +220,9 @@ define zeroext i8 @test_abi_zext_ret(i8* %addr) {
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_LOAD [[ADDR_ADDR]](p0) :: (invariant load (p0) from %fixed-stack.[[STACK16]], align 16)
 ; CHECK: [[SUM:%[0-9]+]]:_(s64) = G_ADD [[LHS]], [[RHS]]
 ; CHECK: G_STORE [[SUM]](s64), [[ADDR]](p0)
-define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
+define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, ptr %addr) {
   %sum = add i64 %lhs, %rhs
-  store i64 %sum, i64* %addr
+  store i64 %sum, ptr %addr
   ret void
 }
 
@@ -244,7 +244,7 @@ define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
 ; CHECK: BL @test_stack_slots
 ; CHECK: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
 define void @test_call_stack() {
-  call void @test_stack_slots([8 x i64] undef, i64 42, i64 12, i64* null)
+  call void @test_stack_slots([8 x i64] undef, i64 42, i64 12, ptr null)
   ret void
 }
 
@@ -267,9 +267,9 @@ define void @test_mem_i1([8 x i64], i1 %in) {
 ; CHECK: $x1 = COPY
 ; CHECK: $x2 = COPY
 ; CHECK: BL @take_128bit_struct
-define void @test_128bit_struct([2 x i64]* %ptr) {
-  %struct = load [2 x i64], [2 x i64]* %ptr
-  call void @take_128bit_struct([2 x i64]* null, [2 x i64] %struct)
+define void @test_128bit_struct(ptr %ptr) {
+  %struct = load [2 x i64], ptr %ptr
+  call void @take_128bit_struct(ptr null, [2 x i64] %struct)
   ret void
 }
 
@@ -277,8 +277,8 @@ define void @test_128bit_struct([2 x i64]* %ptr) {
 ; CHECK: {{%.*}}:_(p0) = COPY $x0
 ; CHECK: {{%.*}}:_(s64) = COPY $x1
 ; CHECK: {{%.*}}:_(s64) = COPY $x2
-define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
-  store [2 x i64] %in, [2 x i64]* %ptr
+define void @take_128bit_struct(ptr %ptr, [2 x i64] %in) {
+  store [2 x i64] %in, ptr %ptr
   ret void
 }
 
@@ -295,9 +295,9 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
 ; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store (s64) into stack, align 1)
 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]](s64)
 ; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store (s64) into stack + 8, align 1)
-define void @test_split_struct([2 x i64]* %ptr) {
-  %struct = load [2 x i64], [2 x i64]* %ptr
-  call void @take_split_struct([2 x i64]* null, i64 1, i64 2, i64 3,
+define void @test_split_struct(ptr %ptr) {
+  %struct = load [2 x i64], ptr %ptr
+  call void @take_split_struct(ptr null, i64 1, i64 2, i64 3,
                                i64 4, i64 5, i64 6,
                                [2 x i64] %struct)
   ret void
@@ -313,10 +313,10 @@ define void @test_split_struct([2 x i64]* %ptr) {
 
 ; CHECK: [[HIPTR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[HI_FRAME]]
 ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[HIPTR]](p0) :: (invariant load (s64) from %fixed-stack.[[HI_FRAME]])
-define void @take_split_struct([2 x i64]* %ptr, i64, i64, i64,
+define void @take_split_struct(ptr %ptr, i64, i64, i64,
                                i64, i64, i64,
                                [2 x i64] %in) {
-  store [2 x i64] %in, [2 x i64]* %ptr
+  store [2 x i64] %in, ptr %ptr
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-of-shifted-dbg-value-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-of-shifted-dbg-value-fallback.ll
index 5ea39d94bf6b..9f3eb0945c84 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-of-shifted-dbg-value-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-of-shifted-dbg-value-fallback.ll
@@ -5,7 +5,7 @@ target triple = "arm64-apple-ios9.0.0"
 
 ; Check we don't fall back due to hitting a DBG_VALUE with a deleted vreg.
 
-%0 = type { %1, %3, %5, %8, i8, i32, i8, i64, [4096 x %9], i64, i64, [4096 x %11], i64, i64, %13, %21, i8*, %35, i64, [504 x i8] }
+%0 = type { %1, %3, %5, %8, i8, i32, i8, i64, [4096 x %9], i64, i64, [4096 x %11], i64, i64, %13, %21, ptr, %35, i64, [504 x i8] }
 %1 = type { [32 x %2] }
 %2 = type { i32, i32 }
 %3 = type { [32 x %4] }
@@ -14,14 +14,14 @@ target triple = "arm64-apple-ios9.0.0"
 %6 = type { %7, %7 }
 %7 = type { i8, [64 x i8] }
 %8 = type { [1024 x %7], %7 }
-%9 = type { %10*, i64 }
-%10 = type { i8*, i8*, i8, i8 }
-%11 = type { %12*, %12* }
+%9 = type { ptr, i64 }
+%10 = type { ptr, ptr, i8, i8 }
+%11 = type { ptr, ptr }
 %12 = type { i64, i64 }
 %13 = type { %14 }
-%14 = type { %15*, %17, %19 }
-%15 = type { %16* }
-%16 = type <{ %15, %16*, %15*, i8, [7 x i8] }>
+%14 = type { ptr, %17, %19 }
+%15 = type { ptr }
+%16 = type <{ %15, ptr, ptr, i8, [7 x i8] }>
 %17 = type { %18 }
 %18 = type { %15 }
 %19 = type { %20 }
@@ -30,8 +30,8 @@ target triple = "arm64-apple-ios9.0.0"
 %22 = type <{ %23, %30, %32, %33, [4 x i8] }>
 %23 = type { %24 }
 %24 = type { %25, %27 }
-%25 = type { %26** }
-%26 = type { %26* }
+%25 = type { ptr }
+%26 = type { ptr }
 %27 = type { %28 }
 %28 = type { %29 }
 %29 = type { %20 }
@@ -44,7 +44,7 @@ target triple = "arm64-apple-ios9.0.0"
 
 @global = external hidden global %0, align 512
 
-define void @baz(i8* %arg) !dbg !6 {
+define void @baz(ptr %arg) !dbg !6 {
 ; CHECK-LABEL: baz:
 ; CHECK:       .Lfunc_begin0:
 ; CHECK-NEXT:    .file 1 "/" "tmp.ll"
@@ -63,12 +63,12 @@ define void @baz(i8* %arg) !dbg !6 {
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .Ltmp0:
 bb:
-  %tmp = ptrtoint i8* %arg to i64, !dbg !14
+  %tmp = ptrtoint ptr %arg to i64, !dbg !14
   %tmp1 = shl i64 %tmp, 1, !dbg !15
   %tmp2 = and i64 %tmp1, 1022, !dbg !16
   call void @llvm.dbg.value(metadata i64 %tmp2, metadata !12, metadata !DIExpression()), !dbg !16
-  %tmp3 = getelementptr inbounds %0, %0* @global, i64 0, i32 17, i32 0, i64 %tmp2, !dbg !17
-  store i64 0, i64* %tmp3, align 16, !dbg !18
+  %tmp3 = getelementptr inbounds %0, ptr @global, i64 0, i32 17, i32 0, i64 %tmp2, !dbg !17
+  store i64 0, ptr %tmp3, align 16, !dbg !18
   ret void, !dbg !19
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combiner-load-store-indexing.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combiner-load-store-indexing.ll
index 8f0b3b2dcf6a..05d0ef9551bb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combiner-load-store-indexing.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combiner-load-store-indexing.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=arm64-apple-ios -global-isel -global-isel-abort=1 -verify-machineinstrs -stop-after=aarch64-prelegalizer-combiner -force-legal-indexing %s -o - | FileCheck %s
 ; RUN: llc -debugify-and-strip-all-safe -mtriple=arm64-apple-ios -global-isel -global-isel-abort=1 -verify-machineinstrs -stop-after=aarch64-prelegalizer-combiner -force-legal-indexing %s -o - | FileCheck %s
 
-define i8* @test_simple_load_pre(i8* %ptr) {
+define ptr @test_simple_load_pre(ptr %ptr) {
 ; CHECK-LABEL: name: test_simple_load_pre
 ; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
 ; CHECK: [[OFFSET:%.*]]:_(s64) = G_CONSTANT i64 42
@@ -9,47 +9,47 @@ define i8* @test_simple_load_pre(i8* %ptr) {
 ; CHECK: {{%.*}}:_(s8), [[NEXT:%.*]]:_(p0) = G_INDEXED_LOAD [[BASE]], [[OFFSET]](s64), 1
 ; CHECK: $x0 = COPY [[NEXT]](p0)
 
-  %next = getelementptr i8, i8* %ptr, i32 42
-  load volatile i8, i8* %next
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i32 42
+  load volatile i8, ptr %next
+  ret ptr %next
 }
 
-define i8* @test_unused_load_pre(i8* %ptr) {
+define ptr @test_unused_load_pre(ptr %ptr) {
 ; CHECK-LABEL: name: test_unused_load_pre
 ; CHECK-NOT: G_INDEXED_LOAD
 
-  %next = getelementptr i8, i8* %ptr, i32 42
-  load volatile i8, i8* %next
-  ret i8* null
+  %next = getelementptr i8, ptr %ptr, i32 42
+  load volatile i8, ptr %next
+  ret ptr null
 }
 
-define void @test_load_multiple_dominated(i8* %ptr, i1 %tst, i1 %tst2) {
+define void @test_load_multiple_dominated(ptr %ptr, i1 %tst, i1 %tst2) {
 ; CHECK-LABEL: name: test_load_multiple_dominated
 ; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
 ; CHECK: [[OFFSET:%.*]]:_(s64) = G_CONSTANT i64 42
 ; CHECK-NOT: G_PTR_ADD
 ; CHECK: {{%.*}}:_(s8), [[NEXT:%.*]]:_(p0) = G_INDEXED_LOAD [[BASE]], [[OFFSET]](s64), 1
 ; CHECK: $x0 = COPY [[NEXT]](p0)
-  %next = getelementptr i8, i8* %ptr, i32 42
+  %next = getelementptr i8, ptr %ptr, i32 42
   br i1 %tst, label %do_load, label %end
 
 do_load:
-  load volatile i8, i8* %next
+  load volatile i8, ptr %next
   br i1 %tst2, label %bb1, label %bb2
 
 bb1:
-  store volatile i8* %next, i8** undef
+  store volatile ptr %next, ptr undef
   ret void
 
 bb2:
-  call void @bar(i8* %next)
+  call void @bar(ptr %next)
   ret void
 
 end:
   ret void
 }
 
-define i8* @test_simple_store_pre(i8* %ptr) {
+define ptr @test_simple_store_pre(ptr %ptr) {
 ; CHECK-LABEL: name: test_simple_store_pre
 ; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%.*]]:_(s8) = G_CONSTANT i8 0
@@ -58,70 +58,69 @@ define i8* @test_simple_store_pre(i8* %ptr) {
 ; CHECK: [[NEXT:%.*]]:_(p0) = G_INDEXED_STORE [[VAL]](s8), [[BASE]], [[OFFSET]](s64), 1
 ; CHECK: $x0 = COPY [[NEXT]](p0)
 
-  %next = getelementptr i8, i8* %ptr, i32 42
-  store volatile i8 0, i8* %next
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i32 42
+  store volatile i8 0, ptr %next
+  ret ptr %next
 }
 
 ; The potentially pre-indexed address is used as the value stored. Converting
 ; would produce the value too late but only by one instruction.
-define i64** @test_store_pre_val_loop(i64** %ptr) {
+define ptr @test_store_pre_val_loop(ptr %ptr) {
 ; CHECK-LABEL: name: test_store_pre_val_loop
 ; CHECK: G_PTR_ADD
 ; CHECK: G_STORE %
 
-  %next = getelementptr i64*, i64** %ptr, i32 42
-  %next.p0 = bitcast i64** %next to i64*
-  store volatile i64* %next.p0, i64** %next
-  ret i64** %next
+  %next = getelementptr ptr, ptr %ptr, i32 42
+  store volatile ptr %next, ptr %next
+  ret ptr %next
 }
 
 ; Potentially pre-indexed address is used between GEP computing it and load.
-define i8* @test_load_pre_before(i8* %ptr) {
+define ptr @test_load_pre_before(ptr %ptr) {
 ; CHECK-LABEL: name: test_load_pre_before
 ; CHECK: G_PTR_ADD
 ; CHECK: BL @bar
 ; CHECK: G_LOAD %
 
-  %next = getelementptr i8, i8* %ptr, i32 42
-  call void @bar(i8* %next)
-  load volatile i8, i8* %next
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i32 42
+  call void @bar(ptr %next)
+  load volatile i8, ptr %next
+  ret ptr %next
 }
 
 ; Materializing the base into a writable register (from sp/fp) would be just as
 ; bad as the original GEP.
-define i8* @test_alloca_load_pre() {
+define ptr @test_alloca_load_pre() {
 ; CHECK-LABEL: name: test_alloca_load_pre
 ; CHECK: G_PTR_ADD
 ; CHECK: G_LOAD %
 
   %ptr = alloca i8, i32 128
-  %next = getelementptr i8, i8* %ptr, i32 42
-  load volatile i8, i8* %next
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i32 42
+  load volatile i8, ptr %next
+  ret ptr %next
 }
 
 ; Load does not dominate use of its address. No indexing.
-define i8* @test_pre_nodom(i8* %in, i1 %tst) {
+define ptr @test_pre_nodom(ptr %in, i1 %tst) {
 ; CHECK-LABEL: name: test_pre_nodom
 ; CHECK: G_PTR_ADD
 ; CHECK: G_LOAD %
 
-  %next = getelementptr i8, i8* %in, i32 16
+  %next = getelementptr i8, ptr %in, i32 16
   br i1 %tst, label %do_indexed, label %use_addr
 
 do_indexed:
-  %val = load i8, i8* %next
-  store i8 %val, i8* @var
-  store i8* %next, i8** @varp8
+  %val = load i8, ptr %next
+  store i8 %val, ptr @var
+  store ptr %next, ptr @varp8
   br label %use_addr
 
 use_addr:
-  ret i8* %next
+  ret ptr %next
 }
 
-define i8* @test_simple_load_post(i8* %ptr) {
+define ptr @test_simple_load_post(ptr %ptr) {
 ; CHECK-LABEL: name: test_simple_load_post
 ; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
 ; CHECK: [[OFFSET:%.*]]:_(s64) = G_CONSTANT i64 42
@@ -129,12 +128,12 @@ define i8* @test_simple_load_post(i8* %ptr) {
 ; CHECK: {{%.*}}:_(s8), [[NEXT:%.*]]:_(p0) = G_INDEXED_LOAD [[BASE]], [[OFFSET]](s64), 0
 ; CHECK: $x0 = COPY [[NEXT]](p0)
 
-  %next = getelementptr i8, i8* %ptr, i32 42
-  load volatile i8, i8* %ptr
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i32 42
+  load volatile i8, ptr %ptr
+  ret ptr %next
 }
 
-define i8* @test_simple_load_post_gep_after(i8* %ptr) {
+define ptr @test_simple_load_post_gep_after(ptr %ptr) {
 ; CHECK-LABEL: name: test_simple_load_post_gep_after
 ; CHECK: [[BASE:%.*]]:_(p0) = COPY $x0
 ; CHECK: BL @get_offset
@@ -143,50 +142,50 @@ define i8* @test_simple_load_post_gep_after(i8* %ptr) {
 ; CHECK: $x0 = COPY [[ADDR]](p0)
 
   %offset = call i64 @get_offset()
-  load volatile i8, i8* %ptr
-  %next = getelementptr i8, i8* %ptr, i64 %offset
-  ret i8* %next
+  load volatile i8, ptr %ptr
+  %next = getelementptr i8, ptr %ptr, i64 %offset
+  ret ptr %next
 }
 
-define i8* @test_load_post_keep_looking(i8* %ptr) {
+define ptr @test_load_post_keep_looking(ptr %ptr) {
 ; CHECK: name: test_load_post_keep_looking
 ; CHECK: G_INDEXED_LOAD
 
   %offset = call i64 @get_offset()
-  load volatile i8, i8* %ptr
-  %intval = ptrtoint i8* %ptr to i8
-  store i8 %intval, i8* @var
+  load volatile i8, ptr %ptr
+  %intval = ptrtoint ptr %ptr to i8
+  store i8 %intval, ptr @var
 
-  %next = getelementptr i8, i8* %ptr, i64 %offset
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i64 %offset
+  ret ptr %next
 }
 
 ; Base is frame index. Using indexing would need copy anyway.
-define i8* @test_load_post_alloca() {
+define ptr @test_load_post_alloca() {
 ; CHECK-LABEL: name: test_load_post_alloca
 ; CHECK: G_PTR_ADD
 ; CHECK: G_LOAD %
 
   %ptr = alloca i8, i32 128
-  %next = getelementptr i8, i8* %ptr, i32 42
-  load volatile i8, i8* %ptr
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i32 42
+  load volatile i8, ptr %ptr
+  ret ptr %next
 }
 
 ; Offset computation does not dominate the load we might be indexing.
-define i8* @test_load_post_gep_offset_after(i8* %ptr) {
+define ptr @test_load_post_gep_offset_after(ptr %ptr) {
 ; CHECK-LABEL: name: test_load_post_gep_offset_after
 ; CHECK: G_LOAD %
 ; CHECK: BL @get_offset
 ; CHECK: G_PTR_ADD
 
-  load volatile i8, i8* %ptr
+  load volatile i8, ptr %ptr
   %offset = call i64 @get_offset()
-  %next = getelementptr i8, i8* %ptr, i64 %offset
-  ret i8* %next
+  %next = getelementptr i8, ptr %ptr, i64 %offset
+  ret ptr %next
 }
 
-declare void @bar(i8*)
+declare void @bar(ptr)
 declare i64 @get_offset()
 @var = global i8 0
- at varp8 = global i8* null
+ at varp8 = global ptr null

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll b/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll
index 8321b98f3f3f..75865695ea20 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/constant-dbg-loc.ll
@@ -30,13 +30,13 @@ define i32 @main() #0 !dbg !14 {
   ; CHECK:   RET_ReallyLR implicit $w0, debug-location !24
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  %0 = load i32, i32* @var1, align 4, !dbg !17
+  store i32 0, ptr %retval, align 4
+  %0 = load i32, ptr @var1, align 4, !dbg !17
   %cmp = icmp eq i32 %0, 1, !dbg !19
   br i1 %cmp, label %if.then, label %if.end, !dbg !20
 
 if.then:
-  store i32 2, i32* @var2, align 4, !dbg !21
+  store i32 2, ptr @var2, align 4, !dbg !21
   br label %if.end, !dbg !23
 
 if.end:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/darwin-tls-call-clobber.ll b/llvm/test/CodeGen/AArch64/GlobalISel/darwin-tls-call-clobber.ll
index 296795b32761..5e66c7a75c74 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/darwin-tls-call-clobber.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/darwin-tls-call-clobber.ll
@@ -4,25 +4,25 @@ target triple = "arm64-apple-ios13.0.0"
 
 @t_val = thread_local global i32 0, align 4
 @.str = private unnamed_addr constant [5 x i8] c"str1\00", align 1
- at str1 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), align 8
+ at str1 = global ptr @.str, align 8
 @.str.1 = private unnamed_addr constant [5 x i8] c"str2\00", align 1
- at str2 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.1, i32 0, i32 0), align 8
+ at str2 = global ptr @.str.1, align 8
 @.str.2 = private unnamed_addr constant [5 x i8] c"str3\00", align 1
- at str3 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.2, i32 0, i32 0), align 8
+ at str3 = global ptr @.str.2, align 8
 @.str.3 = private unnamed_addr constant [5 x i8] c"str4\00", align 1
- at str4 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.3, i32 0, i32 0), align 8
+ at str4 = global ptr @.str.3, align 8
 @.str.4 = private unnamed_addr constant [5 x i8] c"str5\00", align 1
- at str5 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.4, i32 0, i32 0), align 8
+ at str5 = global ptr @.str.4, align 8
 @.str.5 = private unnamed_addr constant [5 x i8] c"str6\00", align 1
- at str6 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.5, i32 0, i32 0), align 8
+ at str6 = global ptr @.str.5, align 8
 @.str.6 = private unnamed_addr constant [5 x i8] c"str7\00", align 1
- at str7 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.6, i32 0, i32 0), align 8
+ at str7 = global ptr @.str.6, align 8
 @.str.7 = private unnamed_addr constant [5 x i8] c"str8\00", align 1
- at str8 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.7, i32 0, i32 0), align 8
+ at str8 = global ptr @.str.7, align 8
 @.str.8 = private unnamed_addr constant [5 x i8] c"str9\00", align 1
- at str9 = global i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.8, i32 0, i32 0), align 8
+ at str9 = global ptr @.str.8, align 8
 @.str.9 = private unnamed_addr constant [6 x i8] c"str10\00", align 1
- at str10 = global i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.9, i32 0, i32 0), align 8
+ at str10 = global ptr @.str.9, align 8
 @.str.10 = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
 @.str.11 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
 @.str.12 = private unnamed_addr constant [4 x i8] c"xyz\00", align 1
@@ -35,138 +35,138 @@ target triple = "arm64-apple-ios13.0.0"
 ; CHECK:	ldr	x[[FPTR:[0-9]+]], [x0]
 ; CHECK:        blr     x[[FPTR]]
 
-define void @_Z4funcPKc(i8* %id) {
+define void @_Z4funcPKc(ptr %id) {
 entry:
-  %id.addr = alloca i8*, align 8
-  store i8* %id, i8** %id.addr, align 8
-  %0 = load i8*, i8** %id.addr, align 8
-  %1 = load i8*, i8** @str1, align 8
-  %cmp = icmp eq i8* %0, %1
+  %id.addr = alloca ptr, align 8
+  store ptr %id, ptr %id.addr, align 8
+  %0 = load ptr, ptr %id.addr, align 8
+  %1 = load ptr, ptr @str1, align 8
+  %cmp = icmp eq ptr %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  %2 = load i8*, i8** @str1, align 8
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %2)
-  %3 = load i8*, i8** @str2, align 8
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %3)
-  %4 = load i8*, i8** @str3, align 8
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %4)
-  %5 = load i8*, i8** @str4, align 8
-  %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %5)
-  %6 = load i8*, i8** @str5, align 8
-  %call4 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %6)
-  %7 = load i8*, i8** @str6, align 8
-  %call5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %7)
-  %8 = load i8*, i8** @str7, align 8
-  %call6 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %8)
-  %9 = load i8*, i8** @str8, align 8
-  %call7 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %9)
-  %10 = load i8*, i8** @str9, align 8
-  %call8 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %10)
-  %11 = load i8*, i8** @str10, align 8
-  %call9 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %11)
-  %12 = load i32, i32* @t_val, align 4
-  %call10 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.11, i64 0, i64 0), i32 %12)
+  %2 = load ptr, ptr @str1, align 8
+  %call = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %2)
+  %3 = load ptr, ptr @str2, align 8
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %3)
+  %4 = load ptr, ptr @str3, align 8
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %4)
+  %5 = load ptr, ptr @str4, align 8
+  %call3 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %5)
+  %6 = load ptr, ptr @str5, align 8
+  %call4 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %6)
+  %7 = load ptr, ptr @str6, align 8
+  %call5 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %7)
+  %8 = load ptr, ptr @str7, align 8
+  %call6 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %8)
+  %9 = load ptr, ptr @str8, align 8
+  %call7 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %9)
+  %10 = load ptr, ptr @str9, align 8
+  %call8 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %10)
+  %11 = load ptr, ptr @str10, align 8
+  %call9 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %11)
+  %12 = load i32, ptr @t_val, align 4
+  %call10 = call i32 (ptr, ...) @printf(ptr @.str.11, i32 %12)
   br label %if.end56
 
 if.else:                                          ; preds = %entry
-  %13 = load i8*, i8** %id.addr, align 8
-  %14 = load i8*, i8** @str2, align 8
-  %cmp11 = icmp eq i8* %13, %14
+  %13 = load ptr, ptr %id.addr, align 8
+  %14 = load ptr, ptr @str2, align 8
+  %cmp11 = icmp eq ptr %13, %14
   br i1 %cmp11, label %if.then12, label %if.else24
 
 if.then12:                                        ; preds = %if.else
-  %15 = load i8*, i8** @str1, align 8
-  %call13 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %15)
-  %16 = load i8*, i8** @str2, align 8
-  %call14 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %16)
-  %17 = load i8*, i8** @str3, align 8
-  %call15 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %17)
-  %18 = load i8*, i8** @str4, align 8
-  %call16 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %18)
-  %19 = load i8*, i8** @str5, align 8
-  %call17 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %19)
-  %20 = load i8*, i8** @str6, align 8
-  %call18 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %20)
-  %21 = load i8*, i8** @str7, align 8
-  %call19 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %21)
-  %22 = load i8*, i8** @str8, align 8
-  %call20 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %22)
-  %23 = load i8*, i8** @str9, align 8
-  %call21 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %23)
-  %24 = load i8*, i8** @str10, align 8
-  %call22 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.10, i64 0, i64 0), i8* %24)
-  %25 = load i32, i32* @t_val, align 4
-  %call23 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.11, i64 0, i64 0), i32 %25)
+  %15 = load ptr, ptr @str1, align 8
+  %call13 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %15)
+  %16 = load ptr, ptr @str2, align 8
+  %call14 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %16)
+  %17 = load ptr, ptr @str3, align 8
+  %call15 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %17)
+  %18 = load ptr, ptr @str4, align 8
+  %call16 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %18)
+  %19 = load ptr, ptr @str5, align 8
+  %call17 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %19)
+  %20 = load ptr, ptr @str6, align 8
+  %call18 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %20)
+  %21 = load ptr, ptr @str7, align 8
+  %call19 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %21)
+  %22 = load ptr, ptr @str8, align 8
+  %call20 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %22)
+  %23 = load ptr, ptr @str9, align 8
+  %call21 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %23)
+  %24 = load ptr, ptr @str10, align 8
+  %call22 = call i32 (ptr, ...) @printf(ptr @.str.10, ptr %24)
+  %25 = load i32, ptr @t_val, align 4
+  %call23 = call i32 (ptr, ...) @printf(ptr @.str.11, i32 %25)
   br label %if.end55
 
 if.else24:                                        ; preds = %if.else
-  %26 = load i8*, i8** %id.addr, align 8
-  %27 = load i8*, i8** @str3, align 8
-  %cmp25 = icmp eq i8* %26, %27
+  %26 = load ptr, ptr %id.addr, align 8
+  %27 = load ptr, ptr @str3, align 8
+  %cmp25 = icmp eq ptr %26, %27
   br i1 %cmp25, label %if.then26, label %if.else27
 
 if.then26:                                        ; preds = %if.else24
   br label %if.end54
 
 if.else27:                                        ; preds = %if.else24
-  %28 = load i8*, i8** %id.addr, align 8
-  %29 = load i8*, i8** @str4, align 8
-  %cmp28 = icmp eq i8* %28, %29
+  %28 = load ptr, ptr %id.addr, align 8
+  %29 = load ptr, ptr @str4, align 8
+  %cmp28 = icmp eq ptr %28, %29
   br i1 %cmp28, label %if.then29, label %if.else30
 
 if.then29:                                        ; preds = %if.else27
   br label %if.end53
 
 if.else30:                                        ; preds = %if.else27
-  %30 = load i8*, i8** %id.addr, align 8
-  %31 = load i8*, i8** @str5, align 8
-  %cmp31 = icmp eq i8* %30, %31
+  %30 = load ptr, ptr %id.addr, align 8
+  %31 = load ptr, ptr @str5, align 8
+  %cmp31 = icmp eq ptr %30, %31
   br i1 %cmp31, label %if.then32, label %if.else33
 
 if.then32:                                        ; preds = %if.else30
   br label %if.end52
 
 if.else33:                                        ; preds = %if.else30
-  %32 = load i8*, i8** %id.addr, align 8
-  %33 = load i8*, i8** @str6, align 8
-  %cmp34 = icmp eq i8* %32, %33
+  %32 = load ptr, ptr %id.addr, align 8
+  %33 = load ptr, ptr @str6, align 8
+  %cmp34 = icmp eq ptr %32, %33
   br i1 %cmp34, label %if.then35, label %if.else36
 
 if.then35:                                        ; preds = %if.else33
   br label %if.end51
 
 if.else36:                                        ; preds = %if.else33
-  %34 = load i8*, i8** %id.addr, align 8
-  %35 = load i8*, i8** @str7, align 8
-  %cmp37 = icmp eq i8* %34, %35
+  %34 = load ptr, ptr %id.addr, align 8
+  %35 = load ptr, ptr @str7, align 8
+  %cmp37 = icmp eq ptr %34, %35
   br i1 %cmp37, label %if.then38, label %if.else39
 
 if.then38:                                        ; preds = %if.else36
   br label %if.end50
 
 if.else39:                                        ; preds = %if.else36
-  %36 = load i8*, i8** %id.addr, align 8
-  %37 = load i8*, i8** @str8, align 8
-  %cmp40 = icmp eq i8* %36, %37
+  %36 = load ptr, ptr %id.addr, align 8
+  %37 = load ptr, ptr @str8, align 8
+  %cmp40 = icmp eq ptr %36, %37
   br i1 %cmp40, label %if.then41, label %if.else42
 
 if.then41:                                        ; preds = %if.else39
   br label %if.end49
 
 if.else42:                                        ; preds = %if.else39
-  %38 = load i8*, i8** %id.addr, align 8
-  %39 = load i8*, i8** @str9, align 8
-  %cmp43 = icmp eq i8* %38, %39
+  %38 = load ptr, ptr %id.addr, align 8
+  %39 = load ptr, ptr @str9, align 8
+  %cmp43 = icmp eq ptr %38, %39
   br i1 %cmp43, label %if.then44, label %if.else45
 
 if.then44:                                        ; preds = %if.else42
   br label %if.end48
 
 if.else45:                                        ; preds = %if.else42
-  %40 = load i8*, i8** %id.addr, align 8
-  %41 = load i8*, i8** @str10, align 8
-  %cmp46 = icmp eq i8* %40, %41
+  %40 = load ptr, ptr %id.addr, align 8
+  %41 = load ptr, ptr @str10, align 8
+  %cmp46 = icmp eq ptr %40, %41
   br i1 %cmp46, label %if.then47, label %if.end
 
 if.then47:                                        ; preds = %if.else45
@@ -202,5 +202,5 @@ if.end55:                                         ; preds = %if.end54, %if.then1
 if.end56:                                         ; preds = %if.end55, %if.then
   ret void
 }
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/debug-cpp.ll b/llvm/test/CodeGen/AArch64/GlobalISel/debug-cpp.ll
index caf0a2eebca5..cb4a01cbcf09 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/debug-cpp.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/debug-cpp.ll
@@ -20,11 +20,10 @@ target triple = "aarch64-unknown-linux-gnu"
 ; CHECK-LABEL: name: _Z3foo6NTCopy
 ; CHECK: DBG_VALUE %{{[0-9]+}}(p0), 0, !23, !DIExpression(), debug-location !24
 ; Function Attrs: noinline nounwind optnone
-define dso_local i32 @_Z3foo6NTCopy(%struct.NTCopy* %o) #0 !dbg !7 {
+define dso_local i32 @_Z3foo6NTCopy(ptr %o) #0 !dbg !7 {
 entry:
-  call void @llvm.dbg.declare(metadata %struct.NTCopy* %o, metadata !23, metadata !DIExpression()), !dbg !24
-  %x = getelementptr inbounds %struct.NTCopy, %struct.NTCopy* %o, i32 0, i32 0, !dbg !25
-  %0 = load i32, i32* %x, align 4, !dbg !25
+  call void @llvm.dbg.declare(metadata ptr %o, metadata !23, metadata !DIExpression()), !dbg !24
+  %0 = load i32, ptr %o, align 4, !dbg !25
   ret i32 %0, !dbg !26
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/debug-insts.ll b/llvm/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
index a024ad8d2057..fbfa119ed2e3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
@@ -11,8 +11,8 @@
 define void @debug_declare(i32 %in) #0 !dbg !7 {
 entry:
   %in.addr = alloca i32, align 4
-  store i32 %in, i32* %in.addr, align 4
-  call void @llvm.dbg.declare(metadata i32* %in.addr, metadata !11, metadata !DIExpression()), !dbg !12
+  store i32 %in, ptr %in.addr, align 4
+  call void @llvm.dbg.declare(metadata ptr %in.addr, metadata !11, metadata !DIExpression()), !dbg !12
   ret void, !dbg !12
 }
 
@@ -21,7 +21,7 @@ entry:
 define void @debug_declare_vla(i32 %in) #0 !dbg !13 {
 entry:
   %vla.addr = alloca i32, i32 %in
-  call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !14, metadata !DIExpression()), !dbg !15
+  call void @llvm.dbg.declare(metadata ptr %vla.addr, metadata !14, metadata !DIExpression()), !dbg !15
   ret void, !dbg !15
 }
 
@@ -33,19 +33,19 @@ define void @debug_value(i32 %in) #0 !dbg !16 {
   %addr = alloca i32
 ; CHECK: DBG_VALUE [[IN]](s32), $noreg, !17, !DIExpression(), debug-location !18
   call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
-  store i32 %in, i32* %addr
+  store i32 %in, ptr %addr
 ; CHECK: DBG_VALUE %1(p0), $noreg, !17, !DIExpression(DW_OP_deref), debug-location !18
-  call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !17, metadata !DIExpression(DW_OP_deref)), !dbg !18
+  call void @llvm.dbg.value(metadata ptr %addr, i64 0, metadata !17, metadata !DIExpression(DW_OP_deref)), !dbg !18
 ; CHECK: DBG_VALUE 123, 0, !17, !DIExpression(), debug-location !18
   call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
 ; CHECK: DBG_VALUE float 1.000000e+00, 0, !17, !DIExpression(), debug-location !18
   call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
 ; CHECK: DBG_VALUE 0, 0, !17, !DIExpression(), debug-location !18
-  call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
+  call void @llvm.dbg.value(metadata ptr null, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
 ; CHECK: DBG_VALUE $noreg, 0, !17, !DIExpression(), debug-location !18
-  call void @llvm.dbg.value(metadata i32* @gv, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
+  call void @llvm.dbg.value(metadata ptr @gv, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
 ; CHECK: DBG_VALUE 42, 0, !17, !DIExpression(), debug-location !18
-  call void @llvm.dbg.value(metadata i32* inttoptr (i64 42 to i32*), i64 0, metadata !17, metadata !DIExpression()), !dbg !18
+  call void @llvm.dbg.value(metadata ptr inttoptr (i64 42 to ptr), i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca-lifetime.ll b/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca-lifetime.ll
index 3ce873f2f6e8..683cdfd9eeac 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca-lifetime.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca-lifetime.ll
@@ -13,10 +13,10 @@
 ; CHECK-NOT: remark{{.*}}foo
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
 
 ; Function Attrs: ssp
 define void @foo(i1 %cond1, i1 %cond2) #1 {
@@ -32,12 +32,10 @@ end1:
   ret void
 
 if.else130:                                       ; preds = %bb1
-  %tmp = getelementptr inbounds [8192 x i8], [8192 x i8]* %bitmapBuffer, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* %tmp) #0
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* %tmp) #0
-  %tmp25 = getelementptr inbounds [8192 x i8], [8192 x i8]* %bitmapBuffer229, i32 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* %tmp25) #0
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* %tmp25) #0
+  call void @llvm.lifetime.start.p0(i64 8192, ptr %bitmapBuffer) #0
+  call void @llvm.lifetime.end.p0(i64 8192, ptr %bitmapBuffer) #0
+  call void @llvm.lifetime.start.p0(i64 8192, ptr %bitmapBuffer229) #0
+  call void @llvm.lifetime.end.p0(i64 8192, ptr %bitmapBuffer229) #0
   br label %end1
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll b/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
index ad2a5fd8c1f8..88eaa1382d1d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -mtriple=aarch64 -global-isel %s -o - -stop-after=irtranslator | FileCheck %s
 
-define i8* @test_simple_alloca(i32 %numelts) {
+define ptr @test_simple_alloca(i32 %numelts) {
   ; CHECK-LABEL: name: test_simple_alloca
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $w0
@@ -17,10 +17,10 @@ define i8* @test_simple_alloca(i32 %numelts) {
   ; CHECK:   $x0 = COPY [[DYN_STACKALLOC]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %addr = alloca i8, i32 %numelts
-  ret i8* %addr
+  ret ptr %addr
 }
 
-define i8* @test_aligned_alloca(i32 %numelts) {
+define ptr @test_aligned_alloca(i32 %numelts) {
   ; CHECK-LABEL: name: test_aligned_alloca
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $w0
@@ -36,10 +36,10 @@ define i8* @test_aligned_alloca(i32 %numelts) {
   ; CHECK:   $x0 = COPY [[DYN_STACKALLOC]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %addr = alloca i8, i32 %numelts, align 32
-  ret i8* %addr
+  ret ptr %addr
 }
 
-define i128* @test_natural_alloca(i32 %numelts) {
+define ptr @test_natural_alloca(i32 %numelts) {
   ; CHECK-LABEL: name: test_natural_alloca
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $w0
@@ -55,5 +55,5 @@ define i128* @test_natural_alloca(i32 %numelts) {
   ; CHECK:   $x0 = COPY [[DYN_STACKALLOC]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
   %addr = alloca i128, i32 %numelts
-  ret i128* %addr
+  ret ptr %addr
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll b/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
index 67af9e7574ba..a793ecbf03f6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
@@ -64,7 +64,7 @@ define <2 x i32> @freeze_ivec() {
   ret <2 x i32> %t1
 }
 
-define i8* @freeze_ptr() {
+define ptr @freeze_ptr() {
 ; CHECK-LABEL: freeze_ptr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x0, x8, #4
@@ -74,9 +74,9 @@ define i8* @freeze_ptr() {
 ; GISEL:       // %bb.0:
 ; GISEL-NEXT:    add x0, x8, #4
 ; GISEL-NEXT:    ret
-  %y1 = freeze i8* undef
-  %t1 = getelementptr i8, i8* %y1, i64 4
-  ret i8* %t1
+  %y1 = freeze ptr undef
+  %t1 = getelementptr i8, ptr %y1, i64 4
+  ret ptr %t1
 }
 
 define i32 @freeze_struct() {

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll b/llvm/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll
index 03794caf808b..e5ca0d41fc54 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/gisel-fail-intermediate-legalizer.ll
@@ -1,8 +1,8 @@
 ;RUN: llc -mtriple=aarch64-unknown-unknown -o - -global-isel -global-isel-abort=2 %s 2>&1 | FileCheck %s
 ; CHECK: fallback
 ; CHECK-LABEL: foo
-define i16 @foo(fp128* %p) {
-  %tmp0 = load fp128, fp128* %p
+define i16 @foo(ptr %p) {
+  %tmp0 = load fp128, ptr %p
   %tmp1 = fptoui fp128 %tmp0 to i16
   ret i16 %tmp1
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll b/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
index b7349b148424..d16a1a0f3346 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inttoptr_add.ll
@@ -10,6 +10,6 @@ define dso_local void @fn() {
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  store i32 1, i32* bitcast (i8* getelementptr inbounds (i8, i8* inttoptr (i32 -3076096 to i8*), i64 36) to i32*), align 4
+  store i32 1, ptr getelementptr inbounds (i8, ptr inttoptr (i32 -3076096 to ptr), i64 36), align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-atomic-metadata.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-atomic-metadata.ll
index 826291973648..c471ba0cabaa 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-atomic-metadata.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-atomic-metadata.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -mtriple=aarch64-- -mcpu=falkor -mattr=+lse -O0 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 
-define i32 @atomicrmw_volatile(i32* %ptr) {
+define i32 @atomicrmw_volatile(ptr %ptr) {
   ; CHECK-LABEL: name: atomicrmw_volatile
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -10,11 +10,11 @@ define i32 @atomicrmw_volatile(i32* %ptr) {
   ; CHECK:   [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile load store monotonic (s32) on %ir.ptr)
   ; CHECK:   $w0 = COPY [[ATOMICRMW_ADD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %oldval = atomicrmw volatile add i32* %ptr, i32 1 monotonic
+  %oldval = atomicrmw volatile add ptr %ptr, i32 1 monotonic
   ret i32 %oldval
 }
 
-define i32 @atomicrmw_falkor(i32* %ptr) {
+define i32 @atomicrmw_falkor(ptr %ptr) {
   ; CHECK-LABEL: name: atomicrmw_falkor
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -23,11 +23,11 @@ define i32 @atomicrmw_falkor(i32* %ptr) {
   ; CHECK:   [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: ("aarch64-strided-access" load store monotonic (s32) on %ir.ptr)
   ; CHECK:   $w0 = COPY [[ATOMICRMW_ADD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %oldval = atomicrmw add i32* %ptr, i32 1 monotonic, !falkor.strided.access !0
+  %oldval = atomicrmw add ptr %ptr, i32 1 monotonic, !falkor.strided.access !0
   ret i32 %oldval
 }
 
-define i32 @atomicrmw_volatile_falkor(i32* %ptr) {
+define i32 @atomicrmw_volatile_falkor(ptr %ptr) {
   ; CHECK-LABEL: name: atomicrmw_volatile_falkor
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -36,11 +36,11 @@ define i32 @atomicrmw_volatile_falkor(i32* %ptr) {
   ; CHECK:   [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile "aarch64-strided-access" load store monotonic (s32) on %ir.ptr)
   ; CHECK:   $w0 = COPY [[ATOMICRMW_ADD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %oldval = atomicrmw volatile add i32* %ptr, i32 1 monotonic, !falkor.strided.access !0
+  %oldval = atomicrmw volatile add ptr %ptr, i32 1 monotonic, !falkor.strided.access !0
   ret i32 %oldval
 }
 
-define i32 @cmpxchg_volatile(i32* %addr) {
+define i32 @cmpxchg_volatile(ptr %addr) {
   ; CHECK-LABEL: name: cmpxchg_volatile
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -50,12 +50,12 @@ define i32 @cmpxchg_volatile(i32* %addr) {
   ; CHECK:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile load store monotonic monotonic (s32) on %ir.addr)
   ; CHECK:   $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %val_success = cmpxchg volatile i32* %addr, i32 0, i32 1 monotonic monotonic
+  %val_success = cmpxchg volatile ptr %addr, i32 0, i32 1 monotonic monotonic
   %value_loaded = extractvalue { i32, i1 } %val_success, 0
   ret i32 %value_loaded
 }
 
-define i32 @cmpxchg_falkor(i32* %addr) {
+define i32 @cmpxchg_falkor(ptr %addr) {
   ; CHECK-LABEL: name: cmpxchg_falkor
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -65,12 +65,12 @@ define i32 @cmpxchg_falkor(i32* %addr) {
   ; CHECK:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: ("aarch64-strided-access" load store monotonic monotonic (s32) on %ir.addr)
   ; CHECK:   $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %val_success = cmpxchg i32* %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0
+  %val_success = cmpxchg ptr %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0
   %value_loaded = extractvalue { i32, i1 } %val_success, 0
   ret i32 %value_loaded
 }
 
-define i32 @cmpxchg_volatile_falkor(i32* %addr) {
+define i32 @cmpxchg_volatile_falkor(ptr %addr) {
   ; CHECK-LABEL: name: cmpxchg_volatile_falkor
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -80,7 +80,7 @@ define i32 @cmpxchg_volatile_falkor(i32* %addr) {
   ; CHECK:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile "aarch64-strided-access" load store monotonic monotonic (s32) on %ir.addr)
   ; CHECK:   $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %val_success = cmpxchg volatile i32* %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0
+  %val_success = cmpxchg volatile ptr %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0
   %value_loaded = extractvalue { i32, i1 } %val_success, 0
   ret i32 %value_loaded
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-block-order.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-block-order.ll
index 89e2fecbe0f5..f9826b6ad5f2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-block-order.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-block-order.ll
@@ -9,7 +9,7 @@ start:
   br label %bb2
 
 bb1:
-  store i8 %0, i8* undef, align 4
+  store i8 %0, ptr undef, align 4
   ret void
 
 bb2:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll
index 683851d196ef..4a3b85d9cec1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-delayed-stack-protector.ll
@@ -32,9 +32,8 @@ define void @caller() sspreq {
   ; CHECK-NEXT:   RET_ReallyLR
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @callee(i32* nonnull %x)
+  call void @callee(ptr nonnull %x)
   ret void
 }
 
-declare void @callee(i32*)
+declare void @callee(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-dilocation.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-dilocation.ll
index 63c5eecd8b9a..a8fc761a3a53 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-dilocation.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-dilocation.ll
@@ -5,10 +5,10 @@
 
 ; CHECK: Checking DILocation from   %retval = alloca i32, align 4 was copied to G_FRAME_INDEX
 ; CHECK: Checking DILocation from   %rv = alloca i32, align 4 was copied to G_FRAME_INDEX
-; CHECK: Checking DILocation from   store i32 0, i32* %retval, align 4 was copied to G_CONSTANT
-; CHECK: Checking DILocation from   store i32 0, i32* %retval, align 4 was copied to G_STORE
-; CHECK: Checking DILocation from   store i32 0, i32* %rv, align 4, !dbg !12 was copied to G_STORE debug-location !12; t.cpp:2:5
-; CHECK: Checking DILocation from   %0 = load i32, i32* %rv, align 4, !dbg !13 was copied to G_LOAD debug-location !13; t.cpp:3:8
+; CHECK: Checking DILocation from   store i32 0, ptr %retval, align 4 was copied to G_CONSTANT
+; CHECK: Checking DILocation from   store i32 0, ptr %retval, align 4 was copied to G_STORE
+; CHECK: Checking DILocation from   store i32 0, ptr %rv, align 4, !dbg !12 was copied to G_STORE debug-location !12; t.cpp:2:5
+; CHECK: Checking DILocation from   %0 = load i32, ptr %rv, align 4, !dbg !13 was copied to G_LOAD debug-location !13; t.cpp:3:8
 ; CHECK: Checking DILocation from   ret i32 %0, !dbg !14 was copied to COPY debug-location !14; t.cpp:3:1
 ; CHECK: Checking DILocation from   ret i32 %0, !dbg !14 was copied to RET_ReallyLR implicit $w0, debug-location !14; t.cpp:3:1
 
@@ -21,10 +21,10 @@ define dso_local i32 @main() !dbg !7 {
 entry:
   %retval = alloca i32, align 4
   %rv = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  call void @llvm.dbg.declare(metadata i32* %rv, metadata !11, metadata !DIExpression()), !dbg !12
-  store i32 0, i32* %rv, align 4, !dbg !12
-  %0 = load i32, i32* %rv, align 4, !dbg !13
+  store i32 0, ptr %retval, align 4
+  call void @llvm.dbg.declare(metadata ptr %rv, metadata !11, metadata !DIExpression()), !dbg !12
+  store i32 0, ptr %rv, align 4, !dbg !12
+  %0 = load i32, ptr %rv, align 4, !dbg !13
   ret i32 %0, !dbg !14
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
index ee9a938bd866..62be253ec08d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
@@ -1,13 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -O0 -mtriple=aarch64-apple-ios -global-isel -stop-after=irtranslator %s -o - | FileCheck %s
 
- at _ZTIi = external global i8*
+ at _ZTIi = external global ptr
 
 declare i32 @foo(i32)
 declare i32 @__gxx_personality_v0(...)
-declare i32 @llvm.eh.typeid.for(i8*)
+declare i32 @llvm.eh.typeid.for(ptr)
 
-define { i8*, i32 } @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define { ptr, i32 } @bar() personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: bar
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
@@ -45,16 +45,16 @@ define { i8*, i32 } @bar() personality i8* bitcast (i32 (...)* @__gxx_personalit
 
 
 broken:
-  %ptr.sel = landingpad { i8*, i32 } catch i8* bitcast(i8** @_ZTIi to i8*)
-  ret { i8*, i32 } %ptr.sel
+  %ptr.sel = landingpad { ptr, i32 } catch ptr @_ZTIi
+  ret { ptr, i32 } %ptr.sel
 
 continue:
-  %sel.int = tail call i32 @llvm.eh.typeid.for(i8* bitcast(i8** @_ZTIi to i8*))
-  %res.good = insertvalue { i8*, i32 } undef, i32 %sel.int, 1
-  ret { i8*, i32 } %res.good
+  %sel.int = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+  %res.good = insertvalue { ptr, i32 } undef, i32 %sel.int, 1
+  ret { ptr, i32 } %res.good
 }
 
-define void @test_invoke_indirect(void()* %callee) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test_invoke_indirect(ptr %callee) personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: test_invoke_indirect
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
@@ -83,15 +83,15 @@ define void @test_invoke_indirect(void()* %callee) personality i8* bitcast (i32
   invoke void %callee() to label %continue unwind label %broken
 
 broken:
-  landingpad { i8*, i32 } catch i8* bitcast(i8** @_ZTIi to i8*)
+  landingpad { ptr, i32 } catch ptr @_ZTIi
   ret void
 
 continue:
   ret void
 }
 
-declare void @printf(i8*, ...)
-define void @test_invoke_varargs() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+declare void @printf(ptr, ...)
+define void @test_invoke_varargs() personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: test_invoke_varargs
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
@@ -127,10 +127,10 @@ define void @test_invoke_varargs() personality i8* bitcast (i32 (...)* @__gxx_pe
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.continue:
   ; CHECK-NEXT:   RET_ReallyLR
-  invoke void(i8*, ...) @printf(i8* null, i32 42, float 1.0) to label %continue unwind label %broken
+  invoke void(ptr, ...) @printf(ptr null, i32 42, float 1.0) to label %continue unwind label %broken
 
 broken:
-  landingpad { i8*, i32 } catch i8* bitcast(i8** @_ZTIi to i8*)
+  landingpad { ptr, i32 } catch ptr @_ZTIi
   ret void
 
 continue:
@@ -140,7 +140,7 @@ continue:
 @global_var = external global i32
 
 declare void @may_throw()
-define i32 @test_lpad_phi() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @test_lpad_phi() personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: test_lpad_phi
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
@@ -175,15 +175,15 @@ define i32 @test_lpad_phi() personality i8* bitcast (i32 (...)* @__gxx_personali
   ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C2]](s32), %bb.1, [[C3]](s32), %bb.2
   ; CHECK-NEXT:   $w0 = COPY [[PHI1]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
-  store i32 42, i32* @global_var
+  store i32 42, ptr @global_var
   invoke void @may_throw()
           to label %continue unwind label %lpad
 
 lpad:                                             ; preds = %entry
   %p = phi i32 [ 11, %0 ]  ; Trivial, but -O0 keeps it
-  %1 = landingpad { i8*, i32 }
-          catch i8* null
-  store i32 %p, i32* @global_var
+  %1 = landingpad { ptr, i32 }
+          catch ptr null
+  store i32 %p, ptr @global_var
   br label %continue
 
 continue:                                         ; preds = %entry, %lpad

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-extract-used-by-dbg.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-extract-used-by-dbg.ll
index dae85e6404b2..9f398b4a9d3b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-extract-used-by-dbg.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-extract-used-by-dbg.ll
@@ -11,11 +11,11 @@ define hidden void @foo() unnamed_addr #1 !dbg !230 {
   br i1 undef, label %bb4, label %bb5
 
 bb4:                                              ; preds = %bb3
-  %i = extractvalue { i8*, i64 } undef, 0
+  %i = extractvalue { ptr, i64 } undef, 0
   ret void
 
 bb5:                                              ; preds = %bb3
-  call void @llvm.dbg.value(metadata i8* %i, metadata !370, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64)), !dbg !372
+  call void @llvm.dbg.value(metadata ptr %i, metadata !370, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64)), !dbg !372
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-indirect-br-repeated-block.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-indirect-br-repeated-block.ll
index bef15631a231..70872da37612 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-indirect-br-repeated-block.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-indirect-br-repeated-block.ll
@@ -16,7 +16,7 @@ define void @foo() {
   ; CHECK:   successors:
   ; CHECK: bb.4 (%ir-block.3):
   ; CHECK:   RET_ReallyLR
-  indirectbr i8* undef, [label %1, label %3, label %2, label %3, label %3]
+  indirectbr ptr undef, [label %1, label %3, label %2, label %3, label %3]
 1:
   unreachable
 2:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
index fa3466047ae1..8cdb0696f7ac 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-inline-asm.ll
@@ -119,7 +119,7 @@ entry:
   ret i32 %0
 }
 
-define zeroext i8 @test_register_output_trunc(i8* %src) nounwind {
+define zeroext i8 @test_register_output_trunc(ptr %src) nounwind {
   ;
   ; CHECK-LABEL: name: test_register_output_trunc
   ; CHECK: bb.1.entry:
@@ -184,7 +184,7 @@ define void @test_input_imm() {
   ret void
 }
 
-define zeroext i8 @test_input_register(i8* %src) nounwind {
+define zeroext i8 @test_input_register(ptr %src) nounwind {
   ; CHECK-LABEL: name: test_input_register
   ; CHECK: bb.1.entry:
   ; CHECK-NEXT:   liveins: $x0
@@ -198,11 +198,11 @@ define zeroext i8 @test_input_register(i8* %src) nounwind {
   ; CHECK-NEXT:   $w0 = COPY [[ZEXT]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
 entry:
-  %0 = tail call i8 asm "ldtrb ${0:w}, [$1]", "=r,r"(i8* %src) nounwind
+  %0 = tail call i8 asm "ldtrb ${0:w}, [$1]", "=r,r"(ptr %src) nounwind
   ret i8 %0
 }
 
-define i32 @test_memory_constraint(i32* %a) nounwind {
+define i32 @test_memory_constraint(ptr %a) nounwind {
   ; CHECK-LABEL: name: test_memory_constraint
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x0
@@ -212,7 +212,7 @@ define i32 @test_memory_constraint(i32* %a) nounwind {
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
   ; CHECK-NEXT:   $w0 = COPY [[COPY1]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
-  %1 = tail call i32 asm "ldr $0, $1", "=r,*m"(i32* elementtype(i32) %a)
+  %1 = tail call i32 asm "ldr $0, $1", "=r,*m"(ptr elementtype(i32) %a)
   ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-invoke-probabilities.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-invoke-probabilities.ll
index 473216e9f170..ca65bbb7cee3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-invoke-probabilities.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-invoke-probabilities.ll
@@ -2,13 +2,13 @@
 
 %struct.foo = type { i64, i64, %struct.pluto, %struct.pluto }
 %struct.pluto = type { %struct.wombat }
-%struct.wombat = type { i32*, i32*, %struct.barney }
+%struct.wombat = type { ptr, ptr, %struct.barney }
 %struct.barney = type { %struct.widget }
-%struct.widget = type { i32* }
+%struct.widget = type { ptr }
 
 declare i32 @hoge(...)
 
-define void @pluto() align 2 personality i8* bitcast (i32 (...)* @hoge to i8*) {
+define void @pluto() align 2 personality ptr @hoge {
 ; CHECK-LABEL: @pluto
 ; CHECK: bb.1.bb
 ; CHECK: successors: %bb.2(0x00000000), %bb.3(0x80000000)
@@ -23,10 +23,10 @@ bb1:                                              ; preds = %bb
   unreachable
 
 bb2:                                              ; preds = %bb
-  %tmp = landingpad { i8*, i32 }
+  %tmp = landingpad { ptr, i32 }
           cleanup
-  %tmp3 = getelementptr inbounds %struct.foo, %struct.foo* undef, i64 0, i32 3, i32 0, i32 0
-  resume { i8*, i32 } %tmp
+  %tmp3 = getelementptr inbounds %struct.foo, ptr undef, i64 0, i32 3, i32 0, i32 0
+  resume { ptr, i32 } %tmp
 }
 
 declare void @spam()

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-load-metadata.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-load-metadata.ll
index 9ec8c6b737cf..bd09e01d41fe 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-load-metadata.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-load-metadata.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -mtriple=aarch64-- -mcpu=falkor -O0 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 
-define i32 @load_invariant(i32* %ptr) {
+define i32 @load_invariant(ptr %ptr) {
   ; CHECK-LABEL: name: load_invariant
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -9,11 +9,11 @@ define i32 @load_invariant(i32* %ptr) {
   ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (invariant load (s32) from %ir.ptr)
   ; CHECK:   $w0 = COPY [[LOAD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %load = load i32, i32* %ptr, align 4, !invariant.load !0
+  %load = load i32, ptr %ptr, align 4, !invariant.load !0
   ret i32 %load
 }
 
-define i32 @load_volatile_invariant(i32* %ptr) {
+define i32 @load_volatile_invariant(ptr %ptr) {
   ; CHECK-LABEL: name: load_volatile_invariant
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -21,11 +21,11 @@ define i32 @load_volatile_invariant(i32* %ptr) {
   ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (volatile invariant load (s32) from %ir.ptr)
   ; CHECK:   $w0 = COPY [[LOAD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %load = load volatile i32, i32* %ptr, align 4, !invariant.load !0
+  %load = load volatile i32, ptr %ptr, align 4, !invariant.load !0
   ret i32 %load
 }
 
-define i32 @load_dereferenceable(i32* dereferenceable(4) %ptr) {
+define i32 @load_dereferenceable(ptr dereferenceable(4) %ptr) {
   ; CHECK-LABEL: name: load_dereferenceable
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -33,11 +33,11 @@ define i32 @load_dereferenceable(i32* dereferenceable(4) %ptr) {
   ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable load (s32) from %ir.ptr)
   ; CHECK:   $w0 = COPY [[LOAD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %load = load i32, i32* %ptr, align 4
+  %load = load i32, ptr %ptr, align 4
   ret i32 %load
 }
 
-define i32 @load_dereferenceable_invariant(i32* dereferenceable(4) %ptr) {
+define i32 @load_dereferenceable_invariant(ptr dereferenceable(4) %ptr) {
   ; CHECK-LABEL: name: load_dereferenceable_invariant
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -45,11 +45,11 @@ define i32 @load_dereferenceable_invariant(i32* dereferenceable(4) %ptr) {
   ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable invariant load (s32) from %ir.ptr)
   ; CHECK:   $w0 = COPY [[LOAD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %load = load i32, i32* %ptr, align 4, !invariant.load !0
+  %load = load i32, ptr %ptr, align 4, !invariant.load !0
   ret i32 %load
 }
 
-define i32 @load_nontemporal(i32* %ptr) {
+define i32 @load_nontemporal(ptr %ptr) {
   ; CHECK-LABEL: name: load_nontemporal
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -57,11 +57,11 @@ define i32 @load_nontemporal(i32* %ptr) {
   ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (non-temporal load (s32) from %ir.ptr)
   ; CHECK:   $w0 = COPY [[LOAD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %load = load i32, i32* %ptr, align 4, !nontemporal !0
+  %load = load i32, ptr %ptr, align 4, !nontemporal !0
   ret i32 %load
 }
 
-define i32 @load_falkor_strided_access(i32* %ptr) {
+define i32 @load_falkor_strided_access(ptr %ptr) {
   ; CHECK-LABEL: name: load_falkor_strided_access
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -69,7 +69,7 @@ define i32 @load_falkor_strided_access(i32* %ptr) {
   ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: ("aarch64-strided-access" load (s32) from %ir.ptr)
   ; CHECK:   $w0 = COPY [[LOAD]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %load = load i32, i32* %ptr, align 4, !falkor.strided.access !0
+  %load = load i32, ptr %ptr, align 4, !falkor.strided.access !0
   ret i32 %load
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-localescape.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-localescape.ll
index 4de9da027033..91f4a480ccef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-localescape.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-localescape.ll
@@ -15,9 +15,9 @@ define void @local_escape() {
   ; CHECK-NEXT:   RET_ReallyLR
   %a = alloca i32
   %b = alloca i32, i32 2
-  call void (...) @llvm.localescape(i32* %a, i32* %b)
-  store i32 42, i32* %a
-  store i32 13, i32* %b
+  call void (...) @llvm.localescape(ptr %a, ptr %b)
+  store i32 42, ptr %a
+  store i32 13, ptr %b
   ret void
 }
 
@@ -36,13 +36,13 @@ define void @local_escape_insert_point() {
   ; CHECK-NEXT:   RET_ReallyLR
   %a = alloca i32
   %b = alloca i32, i32 2
-  store i32 42, i32* %a
-  store i32 13, i32* %b
-  call void (...) @llvm.localescape(i32* %a, i32* null, i32* %b)
+  store i32 42, ptr %a
+  store i32 13, ptr %b
+  call void (...) @llvm.localescape(ptr %a, ptr null, ptr %b)
   ret void
 }
 
-declare void @foo([128 x i32]*)
+declare void @foo(ptr)
 
 ; Check a cast of an alloca
 define void @local_escape_strip_ptr_cast() {
@@ -51,17 +51,16 @@ define void @local_escape_strip_ptr_cast() {
   ; CHECK-NEXT:   LOCAL_ESCAPE <mcsymbol .Llocal_escape_strip_ptr_cast$frame_escape_0>, %stack.0.a
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
   ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.a
-  ; CHECK-NEXT:   G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.cast)
+  ; CHECK-NEXT:   G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.a)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   $x0 = COPY [[FRAME_INDEX]](p0)
   ; CHECK-NEXT:   BL @foo, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   RET_ReallyLR
   %a = alloca [128 x i32]
-  %cast = bitcast [128 x i32]* %a to i32*
-  store i32 42, i32* %cast
-  call void (...) @llvm.localescape(i32* %cast, i32* null)
-  call void @foo([128 x i32]* %a)
+  store i32 42, ptr %a
+  call void (...) @llvm.localescape(ptr %a, ptr null)
+  call void @foo(ptr %a)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-max-address-space.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-max-address-space.ll
index 82bb27a34048..8ff8349d1591 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-max-address-space.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-max-address-space.ll
@@ -3,8 +3,8 @@
 ; CHECK-LABEL: name: store_max_address_space
 ; CHECK: %0:_(p16777215) = COPY $x0
 ; CHECK: G_STORE %1(s32), %0(p16777215) :: (store (s32) into %ir.ptr, addrspace 16777215)
-define void @store_max_address_space(i32 addrspace(16777215)* %ptr) {
-  store i32 0, i32 addrspace(16777215)* %ptr
+define void @store_max_address_space(ptr addrspace(16777215) %ptr) {
+  store i32 0, ptr addrspace(16777215) %ptr
   ret void
 }
 
@@ -12,15 +12,15 @@ define void @store_max_address_space(i32 addrspace(16777215)* %ptr) {
 ; CHECK: %0:_(<2 x p16777215>) = COPY $q0
 ; CHECK: %1:_(p16777215) = G_EXTRACT_VECTOR_ELT %0(<2 x p16777215>), %2(s64)
 ; CHECK: %1(p16777215) :: (store (s32) into %ir.elt0, addrspace 16777215)
-define void @store_max_address_space_vector(<2 x i32 addrspace(16777215)*> %vptr) {
-  %elt0 = extractelement <2 x i32 addrspace(16777215)*> %vptr, i32 0
-  store i32 0, i32 addrspace(16777215)* %elt0
+define void @store_max_address_space_vector(<2 x ptr addrspace(16777215)> %vptr) {
+  %elt0 = extractelement <2 x ptr addrspace(16777215)> %vptr, i32 0
+  store i32 0, ptr addrspace(16777215) %elt0
   ret void
 }
 
 ; CHECK-LABEL: name: max_address_space_vector_max_num_elts
-; CHECK: %0:_(<65535 x p16777215>) = G_LOAD %1(p0) :: (volatile load (<65535 x p16777215>) from `<65535 x i32 addrspace(16777215)*>* undef`, align 524288)
+; CHECK: %0:_(<65535 x p16777215>) = G_LOAD %1(p0) :: (volatile load (<65535 x p16777215>) from `ptr undef`, align 524288)
 define void @max_address_space_vector_max_num_elts() {
-  %load = load volatile <65535 x i32 addrspace(16777215)*>, <65535 x i32 addrspace(16777215)*>* undef
+  %load = load volatile <65535 x ptr addrspace(16777215)>, ptr undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memcpy-inline.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memcpy-inline.ll
index 12a8f819661c..fd12932cfd53 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memcpy-inline.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memcpy-inline.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -mtriple=aarch64-unknown-unknown -global-isel -global-isel-abort=1 -verify-machineinstrs -stop-after=irtranslator %s -o - | FileCheck %s
 
-define void @copy(i8* %dst, i8* %src) {
+define void @copy(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: copy
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -12,11 +12,11 @@ define void @copy(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
   ret void
 }
 
-define void @inline_copy(i8* %dst, i8* %src) {
+define void @inline_copy(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: inline_copy
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -27,11 +27,11 @@ define void @inline_copy(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
+  call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
   ret void
 }
 
-define void @copy_volatile(i8* %dst, i8* %src) {
+define void @copy_volatile(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: copy_volatile
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -42,11 +42,11 @@ define void @copy_volatile(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)
+  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
   ret void
 }
 
-define void @inline_copy_volatile(i8* %dst, i8* %src) {
+define void @inline_copy_volatile(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: inline_copy_volatile
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -57,11 +57,11 @@ define void @inline_copy_volatile(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)
+  call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
   ret void
 }
 
-define void @tail_copy(i8* %dst, i8* %src) {
+define void @tail_copy(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: tail_copy
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -72,11 +72,11 @@ define void @tail_copy(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
   ret void
 }
 
-define void @tail_inline_copy(i8* %dst, i8* %src) {
+define void @tail_inline_copy(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: tail_inline_copy
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -87,11 +87,11 @@ define void @tail_inline_copy(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  tail call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
+  tail call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 false)
   ret void
 }
 
-define void @tail_copy_volatile(i8* %dst, i8* %src) {
+define void @tail_copy_volatile(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: tail_copy_volatile
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -102,11 +102,11 @@ define void @tail_copy_volatile(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
   ret void
 }
 
-define void @tail_inline_copy_volatile(i8* %dst, i8* %src) {
+define void @tail_inline_copy_volatile(ptr %dst, ptr %src) {
   ; CHECK-LABEL: name: tail_inline_copy_volatile
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0, $x1
@@ -117,9 +117,9 @@ define void @tail_inline_copy_volatile(i8* %dst, i8* %src) {
   ; CHECK:   G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
   ; CHECK:   RET_ReallyLR
 entry:
-  tail call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)
+  tail call void @llvm.memcpy.inline.p0.p0.i32(ptr %dst, ptr %src, i32 4, i1 true)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) nounwind
-declare void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) nounwind
+declare void @llvm.memcpy.inline.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memfunc-undef.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memfunc-undef.ll
index 6b7d3396af8c..e96b6bf906da 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memfunc-undef.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-memfunc-undef.ll
@@ -7,8 +7,7 @@ define void @memset() {
   ; CHECK-NEXT:   RET_ReallyLR
 entry:
   %buf = alloca [512 x i8], align 1
-  %ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i32 0, i32 0
-  call void @llvm.memset.p0i8.i32(i8* %ptr, i8 undef, i32 512, i1 false)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 undef, i32 512, i1 false)
   ret void
 }
 
@@ -19,8 +18,7 @@ define void @memcpy() {
   ; CHECK-NEXT:   RET_ReallyLR
 entry:
   %buf = alloca [512 x i8], align 1
-  %ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i32 0, i32 0
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* undef, i32 512, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr %buf, ptr undef, i32 512, i1 false)
   ret void
 }
 
@@ -31,11 +29,10 @@ define void @memmove() {
   ; CHECK-NEXT: RET_ReallyLR
 entry:
   %buf = alloca [512 x i8], align 1
-  %ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i32 0, i32 0
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* %ptr, i8* undef, i32 512, i1 false)
+  call void @llvm.memmove.p0.p0.i32(ptr %buf, ptr undef, i32 512, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) nounwind
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) nounwind
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-op-intrinsics.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-op-intrinsics.ll
index 8341125df7b7..674ef9f1cffa 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-op-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-op-intrinsics.ll
@@ -13,7 +13,7 @@ define i64 @expect_i64(i64 %arg0) {
   ret i64 %expval
 }
 
-define i8* @ptr_annotate(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3) {
+define ptr @ptr_annotate(ptr %arg0, ptr %arg1, ptr %arg2, i32 %arg3) {
   ; CHECK-LABEL: name: ptr_annotate
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $w3, $x0, $x1, $x2
@@ -24,8 +24,8 @@ define i8* @ptr_annotate(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3) {
   ; CHECK:   [[COPY4:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
   ; CHECK:   $x0 = COPY [[COPY4]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
-  %call = call i8* @llvm.ptr.annotation.p0i8(i8* %arg0, i8* %arg1, i8* %arg2, i32 %arg3, i8* null)
-  ret i8* %call
+  %call = call ptr @llvm.ptr.annotation.p0(ptr %arg0, ptr %arg1, ptr %arg2, i32 %arg3, ptr null)
+  ret ptr %call
 }
 
 @.str = private unnamed_addr constant [4 x i8] c"sth\00", section "llvm.metadata"
@@ -39,11 +39,11 @@ define i32 @annotation(i32 %a) {
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK:   $w0 = COPY [[COPY1]](s32)
   ; CHECK:   RET_ReallyLR implicit $w0
-  %call = call i32 @llvm.annotation.i32(i32 %a, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str1, i32 0, i32 0), i32 2)
+  %call = call i32 @llvm.annotation.i32(i32 %a, ptr @.str, ptr @.str1, i32 2)
   ret i32 %call
 }
 
-define i8* @launder_invariant_group(i8* %p) {
+define ptr @launder_invariant_group(ptr %p) {
   ; CHECK-LABEL: name: launder_invariant_group
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -51,11 +51,11 @@ define i8* @launder_invariant_group(i8* %p) {
   ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
   ; CHECK:   $x0 = COPY [[COPY1]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
-  %q = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
-  ret i8* %q
+  %q = call ptr @llvm.launder.invariant.group.p0(ptr %p)
+  ret ptr %q
 }
 
-define i8* @strip_invariant_group(i8* %p) {
+define ptr @strip_invariant_group(ptr %p) {
   ; CHECK-LABEL: name: strip_invariant_group
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -63,15 +63,15 @@ define i8* @strip_invariant_group(i8* %p) {
   ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
   ; CHECK:   $x0 = COPY [[COPY1]](p0)
   ; CHECK:   RET_ReallyLR implicit $x0
-  %q = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
-  ret i8* %q
+  %q = call ptr @llvm.strip.invariant.group.p0(ptr %p)
+  ret ptr %q
 }
 
 declare i64 @llvm.expect.i64(i64, i64) #0
-declare i8* @llvm.ptr.annotation.p0i8(i8*, i8*, i8*, i32, i8*) #1
-declare i32 @llvm.annotation.i32(i32, i8*, i8*, i32) #1
-declare i8* @llvm.launder.invariant.group.p0i8(i8*) #2
-declare i8* @llvm.strip.invariant.group.p0i8(i8*) #3
+declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr) #1
+declare i32 @llvm.annotation.i32(i32, ptr, ptr, i32) #1
+declare ptr @llvm.launder.invariant.group.p0(ptr) #2
+declare ptr @llvm.strip.invariant.group.p0(ptr) #3
 
 attributes #0 = { nounwind readnone willreturn }
 attributes #1 = { nounwind willreturn }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll
index 113ad30c9501..7f008bccd6ab 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll
@@ -10,7 +10,7 @@ entry:
   unreachable
 }
 
-define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @test() personality ptr @__gxx_personality_v0 {
 entry:
 
 ; CHECK-LABEL: name: test
@@ -30,13 +30,13 @@ lpad:
 ; CHECK: bb.3.lpad
 ; CHECK: EH_LABEL
 
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0))
-  resume { i8*, i32 } %0
+  call void (ptr, ...) @printf(ptr @.str.2)
+  resume { ptr, i32 } %0
 
 }
 
 declare dso_local i32 @__gxx_personality_v0(...)
 
-declare dso_local void @printf(i8*, ...)
+declare dso_local void @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll
index 849fb0163785..870f893cbef3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-one-by-n-vector-ptr-add.ll
@@ -6,7 +6,7 @@
 ; We should not create a splat vector for the non-vector index on this
 ; getelementptr. The entire getelementptr should be translated to a scalar
 ; G_PTR_ADD.
-define <1 x i8*> @one_elt_vector_ptr_add_non_vector_idx(<1 x i8*> %vec) {
+define <1 x ptr> @one_elt_vector_ptr_add_non_vector_idx(<1 x ptr> %vec) {
   ; CHECK-LABEL: name: one_elt_vector_ptr_add_non_vector_idx
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $d0
@@ -18,14 +18,14 @@ define <1 x i8*> @one_elt_vector_ptr_add_non_vector_idx(<1 x i8*> %vec) {
   ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK:   $d0 = COPY [[COPY2]](p0)
   ; CHECK:   RET_ReallyLR implicit $d0
-  %ptr_add = getelementptr i8, <1 x i8*> %vec, <1 x i32> <i32 1>
-  ret <1 x i8*> %ptr_add
+  %ptr_add = getelementptr i8, <1 x ptr> %vec, <1 x i32> <i32 1>
+  ret <1 x ptr> %ptr_add
 }
 
 ; We should not create a splat vector for the non-vector pointer on this
 ; getelementptr. The entire getelementptr should be translated to a scalar
 ; G_PTR_ADD.
-define <1 x i8*> @one_elt_vector_ptr_add_non_vector_ptr(i8* %vec) {
+define <1 x ptr> @one_elt_vector_ptr_add_non_vector_ptr(ptr %vec) {
   ; CHECK-LABEL: name: one_elt_vector_ptr_add_non_vector_ptr
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -37,6 +37,6 @@ define <1 x i8*> @one_elt_vector_ptr_add_non_vector_ptr(i8* %vec) {
   ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK:   $d0 = COPY [[COPY2]](p0)
   ; CHECK:   RET_ReallyLR implicit $d0
-  %ptr_add = getelementptr i8, i8* %vec, <1 x i32> <i32 1>
-  ret <1 x i8*> %ptr_add
+  %ptr_add = getelementptr i8, ptr %vec, <1 x i32> <i32 1>
+  ret <1 x ptr> %ptr_add
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-objects.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-objects.ll
index b80af251f418..78b30b61574b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-objects.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-objects.ll
@@ -11,7 +11,7 @@
 ; CHECK:  - { id: 1, type: default, offset: 0, size: 8, alignment: 16, stack-id: default,
 ; CHECK-NEXT: isImmutable: true, isAliased: false,
 define void @stack_passed_i64(i64 %arg, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6,
-                              i64 %arg7, i64 %arg8, i64* byval(i64) %arg9) {
+                              i64 %arg7, i64 %arg8, ptr byval(i64) %arg9) {
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
   ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64)  from %fixed-stack.1, align 16)
@@ -21,8 +21,8 @@ define void @stack_passed_i64(i64 %arg, i64 %arg1, i64 %arg2, i64 %arg3, i64 %ar
   ; CHECK:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD1]], [[LOAD]]
   ; CHECK:   G_STORE [[ADD]](s64), [[COPY8]](p0) :: (volatile store (s64) into %ir.arg9)
   ; CHECK:   RET_ReallyLR
-  %load = load i64, i64* %arg9
+  %load = load i64, ptr %arg9
   %add = add i64 %load, %arg8
-  store volatile i64 %add, i64* %arg9
+  store volatile i64 %add, ptr %arg9
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
index 7ce80afb8c7f..6aefc5341da0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
@@ -30,9 +30,8 @@ define void @caller() sspreq {
 ; CHECK-NEXT:    .seh_endproc
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @callee(i32* nonnull %x)
+  call void @callee(ptr nonnull %x)
   ret void
 }
 
-declare void @callee(i32*)
+declare void @callee(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll
index 10e394dda64b..f9f92b9e2190 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-store-metadata.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -mtriple=aarch64-- -mcpu=falkor -O0 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @store_nontemporal(i32* dereferenceable(4) %ptr) {
+define void @store_nontemporal(ptr dereferenceable(4) %ptr) {
   ; CHECK-LABEL: name: store_nontemporal
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -9,11 +9,11 @@ define void @store_nontemporal(i32* dereferenceable(4) %ptr) {
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   G_STORE [[C]](s32), [[COPY]](p0) :: (non-temporal store (s32) into %ir.ptr)
   ; CHECK:   RET_ReallyLR
-  store i32 0, i32* %ptr, align 4, !nontemporal !0
+  store i32 0, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 
-define void @store_dereferenceable(i32* dereferenceable(4) %ptr) {
+define void @store_dereferenceable(ptr dereferenceable(4) %ptr) {
   ; CHECK-LABEL: name: store_dereferenceable
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -21,11 +21,11 @@ define void @store_dereferenceable(i32* dereferenceable(4) %ptr) {
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32) into %ir.ptr)
   ; CHECK:   RET_ReallyLR
-  store i32 0, i32* %ptr, align 4
+  store i32 0, ptr %ptr, align 4
   ret void
 }
 
-define void @store_volatile_dereferenceable(i32* dereferenceable(4) %ptr) {
+define void @store_volatile_dereferenceable(ptr dereferenceable(4) %ptr) {
   ; CHECK-LABEL: name: store_volatile_dereferenceable
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -33,11 +33,11 @@ define void @store_volatile_dereferenceable(i32* dereferenceable(4) %ptr) {
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   G_STORE [[C]](s32), [[COPY]](p0) :: (volatile store (s32) into %ir.ptr)
   ; CHECK:   RET_ReallyLR
-  store volatile i32 0, i32* %ptr, align 4
+  store volatile i32 0, ptr %ptr, align 4
   ret void
 }
 
-define void @store_falkor_strided_access(i32* %ptr) {
+define void @store_falkor_strided_access(ptr %ptr) {
   ; CHECK-LABEL: name: store_falkor_strided_access
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -45,7 +45,7 @@ define void @store_falkor_strided_access(i32* %ptr) {
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   G_STORE [[C]](s32), [[COPY]](p0) :: ("aarch64-strided-access" store (s32) into %ir.ptr)
   ; CHECK:   RET_ReallyLR
-  store i32 0, i32* %ptr, align 4, !falkor.strided.access !0
+  store i32 0, ptr %ptr, align 4, !falkor.strided.access !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-bittest.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-bittest.ll
index f4939c3d8604..a39f8542afa0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-bittest.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-bittest.ll
@@ -229,7 +229,7 @@ define void @bit_test_block_incomplete_phi() {
   ; CHECK: bb.3.if.end:
   ; CHECK-NEXT:   successors: %bb.4(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[DEF1]](p0) :: (load (p0) from `i8** undef`)
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[DEF1]](p0) :: (load (p0) from `ptr undef`)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.4.return:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s1) = G_PHI [[C]](s1), %bb.3, [[C1]](s1), %bb.5
@@ -253,7 +253,7 @@ sw.epilog.i:                                      ; preds = %entry
   unreachable
 
 if.end:                                           ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry
-  %0 = load i8*, i8** undef, align 8
+  %0 = load ptr, ptr undef, align 8
   br label %return
 
 return:                                           ; preds = %if.end, %entry, %entry, %entry, %entry

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-tbaa.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-tbaa.ll
index 2f4341049216..3daeebe27a2a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-tbaa.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-tbaa.ll
@@ -4,11 +4,10 @@ target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 
 define void @snork() {
 bb:
-  %tmp1 = getelementptr i16, i16* null, i64 0
-  %tmp5 = getelementptr i16, i16* null, i64 2
-  %tmp6 = load i16, i16* %tmp1, align 2, !tbaa !0
-  store i16 %tmp6, i16* %tmp5, align 2, !tbaa !0
-  ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD %{{[0-9]+}}(p0) :: (load (s16) from %ir.tmp1, !tbaa !0)
+  %tmp5 = getelementptr i16, ptr null, i64 2
+  %tmp6 = load i16, ptr null, align 2, !tbaa !0
+  store i16 %tmp6, ptr %tmp5, align 2, !tbaa !0
+  ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD %{{[0-9]+}}(p0) :: (load (s16) from `ptr null`, !tbaa !0)
   ; CHECK: G_STORE [[LOAD]](s16), %{{[0-9]+}}(p0) :: (store (s16) into %ir.tmp5, !tbaa !0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
index 8cb6309a10af..ae63835d5251 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll
@@ -13,13 +13,12 @@ entry:
   unreachable
 }
 
-define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @test() personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: test
   ; CHECK: bb.1.entry:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @.str.2
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY [[GV]](p0)
   ; CHECK-NEXT:   EH_LABEL <mcsymbol >
   ; CHECK-NEXT:   INLINEASM &"bl trap", 1 /* sideeffect attdialect */
   ; CHECK-NEXT:   EH_LABEL <mcsymbol >
@@ -33,15 +32,15 @@ define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_persona
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   EH_LABEL <mcsymbol >
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x1
-  ; CHECK-NEXT:   [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY2]](p0)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+  ; CHECK-NEXT:   [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p0)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-  ; CHECK-NEXT:   $x0 = COPY [[COPY]](p0)
+  ; CHECK-NEXT:   $x0 = COPY [[GV]](p0)
   ; CHECK-NEXT:   BL @printf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-  ; CHECK-NEXT:   $x0 = COPY [[COPY1]](p0)
+  ; CHECK-NEXT:   $x0 = COPY [[COPY]](p0)
   ; CHECK-NEXT:   BL @_Unwind_Resume, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
 entry:
@@ -55,14 +54,14 @@ invoke.cont:
 
 lpad:
 
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0))
-  resume { i8*, i32 } %0
+  call void (ptr, ...) @printf(ptr @.str.2)
+  resume { ptr, i32 } %0
 
 }
 
-define void @test2() #0 personality i32 (...)* @__gcc_personality_v0 {
+define void @test2() #0 personality ptr @__gcc_personality_v0 {
   ; CHECK-LABEL: name: test2
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
@@ -87,15 +86,15 @@ define void @test2() #0 personality i32 (...)* @__gcc_personality_v0 {
   ; CHECK-NEXT:   $x0 = COPY [[COPY1]](p0)
   ; CHECK-NEXT:   BL @_Unwind_Resume, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-  invoke void asm sideeffect "", "r"(i64* undef) to label %a unwind label %b
+  invoke void asm sideeffect "", "r"(ptr undef) to label %a unwind label %b
 a:
   ret void
 b:
-  %landing_pad = landingpad { i8*, i32 } cleanup
-  resume { i8*, i32 } %landing_pad
+  %landing_pad = landingpad { ptr, i32 } cleanup
+  resume { ptr, i32 } %landing_pad
 }
 
 declare i32 @__gcc_personality_v0(...)
 declare dso_local i32 @__gxx_personality_v0(...)
 
-declare dso_local void @printf(i8*, ...)
+declare dso_local void @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-volatile-load-pr36018.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-volatile-load-pr36018.ll
index 099a9e71e42d..aa4ed0ea04ec 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-volatile-load-pr36018.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-volatile-load-pr36018.ll
@@ -7,7 +7,7 @@ declare void @bar(i32)
 define hidden void @foo() {
 ; CHECK-NOT: ldrh
 ; CHECK: ldrsh
-  %1 = load volatile i16, i16* @g, align 2
+  %1 = load volatile i16, ptr @g, align 2
   %2 = sext i16 %1 to i32
   call void @bar(i32 %2)
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-weird-alloca-size.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-weird-alloca-size.ll
index 741bae3d7570..0a632d45a91c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-weird-alloca-size.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-weird-alloca-size.ll
@@ -12,8 +12,8 @@
 ; CHECK-NEXT: - { id: 0, name: stack_slot, type: default, offset: 0, size: 4, alignment: 4
 define void @foo() {
   %stack_slot = alloca i19
-  call void @bar(i19* %stack_slot)
+  call void @bar(ptr %stack_slot)
   ret void
 }
 
-declare void @bar(i19* %a)
+declare void @bar(ptr %a)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
index 8ad1dbc695fd..0f43625ec166 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -1,14 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -O0 -mtriple=aarch64-apple-ios -verify-machineinstrs -global-isel -stop-after=legalizer %s -o - | FileCheck %s
 
- at _ZTIi = external global i8*
+ at _ZTIi = external global ptr
 
 declare i32 @foo(i32)
 declare i32 @__gxx_personality_v0(...)
-declare i32 @llvm.eh.typeid.for(i8*)
-declare void @_Unwind_Resume(i8*)
+declare i32 @llvm.eh.typeid.for(ptr)
+declare void @_Unwind_Resume(ptr)
 
-define void @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @bar() personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: bar
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
@@ -47,23 +47,23 @@ define void @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to
   ; CHECK-NEXT:   BL @_Unwind_Resume, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
   ; CHECK-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
-  %exn.slot = alloca i8*
+  %exn.slot = alloca ptr
   %ehselector.slot = alloca i32
   %1 = invoke i32 @foo(i32 42) to label %continue unwind label %cleanup
 
 cleanup:
-  %2 = landingpad { i8*, i32 } cleanup
-  %3 = extractvalue { i8*, i32 } %2, 0
-  store i8* %3, i8** %exn.slot, align 8
-  %4 = extractvalue { i8*, i32 } %2, 1
-  store i32 %4, i32* %ehselector.slot, align 4
+  %2 = landingpad { ptr, i32 } cleanup
+  %3 = extractvalue { ptr, i32 } %2, 0
+  store ptr %3, ptr %exn.slot, align 8
+  %4 = extractvalue { ptr, i32 } %2, 1
+  store i32 %4, ptr %ehselector.slot, align 4
   br label %eh.resume
 
 continue:
   ret void
 
 eh.resume:
-  %exn = load i8*, i8** %exn.slot, align 8
-  call void @_Unwind_Resume(i8* %exn)
+  %exn = load ptr, ptr %exn.slot, align 8
+  call void @_Unwind_Resume(ptr %exn)
   unreachable
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
index a4638d87a08c..ac4590e6e83b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
@@ -50,15 +50,15 @@ define i32 @foo() {
   ; CHECK-NEXT:   $w0 = COPY [[C6]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
 entry:
-  %0 = load i32, i32* @var1, align 4
+  %0 = load i32, ptr @var1, align 4
   %cmp = icmp eq i32 %0, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 2, i32* @var2, align 4
-  store i32 3, i32* @var1, align 4
-  store i32 2, i32* @var3, align 4
-  store i32 3, i32* @var1, align 4
+  store i32 2, ptr @var2, align 4
+  store i32 3, ptr @var1, align 4
+  store i32 2, ptr @var3, align 4
+  store i32 3, ptr @var1, align 4
   br label %if.end
 
 if.end:
@@ -101,13 +101,13 @@ define i32 @darwin_tls() {
   ; CHECK-NEXT:   $w0 = COPY [[C2]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
 entry:
-  %0 = load i32, i32* @var1, align 4
+  %0 = load i32, ptr @var1, align 4
   %cmp = icmp eq i32 %0, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  %tls = load i32, i32* @tls_gv, align 4
-  store i32 %tls, i32* @var2, align 4
+  %tls = load i32, ptr @tls_gv, align 4
+  store i32 %tls, ptr @var2, align 4
   br label %if.end
 
 if.end:
@@ -152,21 +152,21 @@ define i32 @imm_cost_too_large_cost_of_2() {
   ; CHECK-NEXT:   $w0 = COPY [[C3]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
 entry:
-  %0 = load i32, i32* @var1, align 4
+  %0 = load i32, ptr @var1, align 4
   %cst1 = bitcast i32 -2228259 to i32
   %cmp = icmp eq i32 %0, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %cst1, i32* @var2
+  store i32 %cst1, ptr @var2
   br label %if.then2
 
 if.then2:
-  store i32 %cst1, i32* @var1
+  store i32 %cst1, ptr @var1
   br label %if.end
 
 if.end:
-  store i32 %cst1, i32* @var3
+  store i32 %cst1, ptr @var3
   ret i32 0
 }
 
@@ -209,21 +209,21 @@ define i64 @imm_cost_too_large_cost_of_4() {
   ; CHECK-NEXT:   $x0 = COPY [[C4]](s64)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
 entry:
-  %0 = load i64, i64* @var1_64, align 4
+  %0 = load i64, ptr @var1_64, align 4
   %cst1 = bitcast i64 -2228259 to i64
   %cmp = icmp eq i64 %0, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i64 %cst1, i64* @var2_64
+  store i64 %cst1, ptr @var2_64
   br label %if.then2
 
 if.then2:
-  store i64 %cst1, i64* @var1_64
+  store i64 %cst1, ptr @var1_64
   br label %if.end
 
 if.end:
-  store i64 %cst1, i64* @var3_64
+  store i64 %cst1, ptr @var3_64
   ret i64 0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/memcpy_chk_no_tail.ll b/llvm/test/CodeGen/AArch64/GlobalISel/memcpy_chk_no_tail.ll
index 68d833338ad6..263dfbdc139b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/memcpy_chk_no_tail.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/memcpy_chk_no_tail.ll
@@ -9,20 +9,18 @@ target triple = "arm64-apple-ios13.0.0"
 ; CHECK-LABEL: @usqrt
 ; CHECK-NOT: b memcpy
 ; CHECK: bl _memcpy
-define void @usqrt(i32 %x, %struct.int_sqrt* %q) local_unnamed_addr #0 {
+define void @usqrt(i32 %x, ptr %q) local_unnamed_addr #0 {
   %a = alloca i32, align 4
-  %bc = bitcast i32* %a to i8*
-  %bc2 = bitcast %struct.int_sqrt* %q to i8*
-  %obj = tail call i64 @llvm.objectsize.i64.p0i8(i8* %bc2, i1 false, i1 true, i1 false)
-  %call = call i8* @__memcpy_chk(i8* %bc2, i8* nonnull %bc, i64 1000, i64 %obj) #4
+  %obj = tail call i64 @llvm.objectsize.i64.p0(ptr %q, i1 false, i1 true, i1 false)
+  %call = call ptr @__memcpy_chk(ptr %q, ptr nonnull %a, i64 1000, i64 %obj) #4
   ret void
 }
 
 ; Function Attrs: nofree nounwind optsize
-declare i8* @__memcpy_chk(i8*, i8*, i64, i64) local_unnamed_addr #2
+declare ptr @__memcpy_chk(ptr, ptr, i64, i64) local_unnamed_addr #2
 
 ; Function Attrs: nounwind readnone speculatable willreturn
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg) #3
+declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg) #3
 attributes #0 = { optsize "disable-tail-calls"="false" "frame-pointer"="all" }
 attributes #2 = { nofree nounwind "disable-tail-calls"="false" "frame-pointer"="all" }
 attributes #3 = { nounwind readnone speculatable willreturn }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/merge-stores-truncating.ll b/llvm/test/CodeGen/AArch64/GlobalISel/merge-stores-truncating.ll
index 80e7ab94575f..ff4044be8e35 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/merge-stores-truncating.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/merge-stores-truncating.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-apple-ios -global-isel -global-isel-abort=1 | FileCheck %s
 
-define dso_local void @trunc_i16_to_i8(i16 %x, i8* %p) {
+define dso_local void @trunc_i16_to_i8(i16 %x, ptr %p) {
 ; CHECK-LABEL: trunc_i16_to_i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w0, [x1]
@@ -9,13 +9,13 @@ define dso_local void @trunc_i16_to_i8(i16 %x, i8* %p) {
   %t1 = trunc i16 %x to i8
   %sh = lshr i16 %x, 8
   %t2 = trunc i16 %sh to i8
-  store i8 %t1, i8* %p, align 1
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %t2, i8* %p1, align 1
+  store i8 %t1, ptr %p, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %t2, ptr %p1, align 1
   ret void
 }
 
-define dso_local void @trunc_i32_to_i8(i32 %x, i8* %p) {
+define dso_local void @trunc_i32_to_i8(i32 %x, ptr %p) {
 ; CHECK-LABEL: trunc_i32_to_i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w0, [x1]
@@ -27,17 +27,17 @@ define dso_local void @trunc_i32_to_i8(i32 %x, i8* %p) {
   %t3 = trunc i32 %sh2 to i8
   %sh3 = lshr i32 %x, 24
   %t4 = trunc i32 %sh3 to i8
-  store i8 %t1, i8* %p, align 1
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %t2, i8* %p1, align 1
-  %p2 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %t3, i8* %p2, align 1
-  %p3 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %t4, i8* %p3, align 1
+  store i8 %t1, ptr %p, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %t2, ptr %p1, align 1
+  %p2 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %t3, ptr %p2, align 1
+  %p3 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %t4, ptr %p3, align 1
   ret void
 }
 
-define dso_local void @trunc_i32_to_i16(i32 %x, i16* %p) {
+define dso_local void @trunc_i32_to_i16(i32 %x, ptr %p) {
 ; CHECK-LABEL: trunc_i32_to_i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w0, [x1]
@@ -45,13 +45,13 @@ define dso_local void @trunc_i32_to_i16(i32 %x, i16* %p) {
   %t1 = trunc i32 %x to i16
   %sh = lshr i32 %x, 16
   %t2 = trunc i32 %sh to i16
-  store i16 %t1, i16* %p, align 2
-  %p1 = getelementptr inbounds i16, i16* %p, i64 1
-  store i16 %t2, i16* %p1, align 2
+  store i16 %t1, ptr %p, align 2
+  %p1 = getelementptr inbounds i16, ptr %p, i64 1
+  store i16 %t2, ptr %p1, align 2
   ret void
 }
 
-define dso_local void @be_i32_to_i16(i32 %x, i16* %p0) {
+define dso_local void @be_i32_to_i16(i32 %x, ptr %p0) {
 ; CHECK-LABEL: be_i32_to_i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ror w8, w0, #16
@@ -60,13 +60,13 @@ define dso_local void @be_i32_to_i16(i32 %x, i16* %p0) {
   %sh1 = lshr i32 %x, 16
   %t0 = trunc i32 %x to i16
   %t1 = trunc i32 %sh1 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  store i16 %t0, i16* %p1, align 2
-  store i16 %t1, i16* %p0, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  store i16 %t0, ptr %p1, align 2
+  store i16 %t1, ptr %p0, align 2
   ret void
 }
 
-define dso_local void @be_i32_to_i16_order(i32 %x, i16* %p0) {
+define dso_local void @be_i32_to_i16_order(i32 %x, ptr %p0) {
 ; CHECK-LABEL: be_i32_to_i16_order:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ror w8, w0, #16
@@ -75,13 +75,13 @@ define dso_local void @be_i32_to_i16_order(i32 %x, i16* %p0) {
   %sh1 = lshr i32 %x, 16
   %t0 = trunc i32 %x to i16
   %t1 = trunc i32 %sh1 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  store i16 %t1, i16* %p0, align 2
-  store i16 %t0, i16* %p1, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  store i16 %t1, ptr %p0, align 2
+  store i16 %t0, ptr %p1, align 2
   ret void
 }
 
-define dso_local void @trunc_i64_to_i8(i64 %x, i8* %p) {
+define dso_local void @trunc_i64_to_i8(i64 %x, ptr %p) {
 ; CHECK-LABEL: trunc_i64_to_i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str x0, [x1]
@@ -101,25 +101,25 @@ define dso_local void @trunc_i64_to_i8(i64 %x, i8* %p) {
   %t7 = trunc i64 %sh6 to i8
   %sh7 = lshr i64 %x, 56
   %t8 = trunc i64 %sh7 to i8
-  store i8 %t1, i8* %p, align 1
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %t2, i8* %p1, align 1
-  %p2 = getelementptr inbounds i8, i8* %p, i64 2
-  store i8 %t3, i8* %p2, align 1
-  %p3 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %t4, i8* %p3, align 1
-  %p4 = getelementptr inbounds i8, i8* %p, i64 4
-  store i8 %t5, i8* %p4, align 1
-  %p5 = getelementptr inbounds i8, i8* %p, i64 5
-  store i8 %t6, i8* %p5, align 1
-  %p6 = getelementptr inbounds i8, i8* %p, i64 6
-  store i8 %t7, i8* %p6, align 1
-  %p7 = getelementptr inbounds i8, i8* %p, i64 7
-  store i8 %t8, i8* %p7, align 1
+  store i8 %t1, ptr %p, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %t2, ptr %p1, align 1
+  %p2 = getelementptr inbounds i8, ptr %p, i64 2
+  store i8 %t3, ptr %p2, align 1
+  %p3 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %t4, ptr %p3, align 1
+  %p4 = getelementptr inbounds i8, ptr %p, i64 4
+  store i8 %t5, ptr %p4, align 1
+  %p5 = getelementptr inbounds i8, ptr %p, i64 5
+  store i8 %t6, ptr %p5, align 1
+  %p6 = getelementptr inbounds i8, ptr %p, i64 6
+  store i8 %t7, ptr %p6, align 1
+  %p7 = getelementptr inbounds i8, ptr %p, i64 7
+  store i8 %t8, ptr %p7, align 1
   ret void
 }
 
-define dso_local void @trunc_i64_to_i16(i64 %x, i16* %p) {
+define dso_local void @trunc_i64_to_i16(i64 %x, ptr %p) {
 ; CHECK-LABEL: trunc_i64_to_i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str x0, [x1]
@@ -131,17 +131,17 @@ define dso_local void @trunc_i64_to_i16(i64 %x, i16* %p) {
   %t3 = trunc i64 %sh2 to i16
   %sh3 = lshr i64 %x, 48
   %t4 = trunc i64 %sh3 to i16
-  store i16 %t1, i16* %p, align 2
-  %p1 = getelementptr inbounds i16, i16* %p, i64 1
-  store i16 %t2, i16* %p1, align 2
-  %p2 = getelementptr inbounds i16, i16* %p, i64 2
-  store i16 %t3, i16* %p2, align 2
-  %p3 = getelementptr inbounds i16, i16* %p, i64 3
-  store i16 %t4, i16* %p3, align 2
+  store i16 %t1, ptr %p, align 2
+  %p1 = getelementptr inbounds i16, ptr %p, i64 1
+  store i16 %t2, ptr %p1, align 2
+  %p2 = getelementptr inbounds i16, ptr %p, i64 2
+  store i16 %t3, ptr %p2, align 2
+  %p3 = getelementptr inbounds i16, ptr %p, i64 3
+  store i16 %t4, ptr %p3, align 2
   ret void
 }
 
-define dso_local void @trunc_i64_to_i32(i64 %x, i32* %p) {
+define dso_local void @trunc_i64_to_i32(i64 %x, ptr %p) {
 ; CHECK-LABEL: trunc_i64_to_i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str x0, [x1]
@@ -149,12 +149,12 @@ define dso_local void @trunc_i64_to_i32(i64 %x, i32* %p) {
   %t1 = trunc i64 %x to i32
   %sh = lshr i64 %x, 32
   %t2 = trunc i64 %sh to i32
-  store i32 %t1, i32* %p, align 4
-  %p1 = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 %t2, i32* %p1, align 4
+  store i32 %t1, ptr %p, align 4
+  %p1 = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 %t2, ptr %p1, align 4
   ret void
 }
-define dso_local void @be_i64_to_i32(i64 %x, i32* %p0) {
+define dso_local void @be_i64_to_i32(i64 %x, ptr %p0) {
 ; CHECK-LABEL: be_i64_to_i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ror x8, x0, #32
@@ -163,13 +163,13 @@ define dso_local void @be_i64_to_i32(i64 %x, i32* %p0) {
   %sh1 = lshr i64 %x, 32
   %t0 = trunc i64 %x to i32
   %t1 = trunc i64 %sh1 to i32
-  %p1 = getelementptr inbounds i32, i32* %p0, i64 1
-  store i32 %t0, i32* %p1, align 4
-  store i32 %t1, i32* %p0, align 4
+  %p1 = getelementptr inbounds i32, ptr %p0, i64 1
+  store i32 %t0, ptr %p1, align 4
+  store i32 %t1, ptr %p0, align 4
   ret void
 }
 
-define dso_local void @be_i64_to_i32_order(i64 %x, i32* %p0) {
+define dso_local void @be_i64_to_i32_order(i64 %x, ptr %p0) {
 ; CHECK-LABEL: be_i64_to_i32_order:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ror x8, x0, #32
@@ -178,84 +178,80 @@ define dso_local void @be_i64_to_i32_order(i64 %x, i32* %p0) {
   %sh1 = lshr i64 %x, 32
   %t0 = trunc i64 %x to i32
   %t1 = trunc i64 %sh1 to i32
-  %p1 = getelementptr inbounds i32, i32* %p0, i64 1
-  store i32 %t1, i32* %p0, align 4
-  store i32 %t0, i32* %p1, align 4
+  %p1 = getelementptr inbounds i32, ptr %p0, i64 1
+  store i32 %t1, ptr %p0, align 4
+  store i32 %t0, ptr %p1, align 4
   ret void
 }
 
 ; Negative tests.
 
-define void @merge_hole(i32 %x, i8* %p) {
+define void @merge_hole(i32 %x, ptr %p) {
 ; CHECK-LABEL: merge_hole:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #16
 ; CHECK-NEXT:    strb w0, [x1]
 ; CHECK-NEXT:    strh w8, [x1, #2]
 ; CHECK-NEXT:    ret
-  %pcast = bitcast i8* %p to i16*
-  %p2 = getelementptr inbounds i16, i16* %pcast, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p, i64 1
   %x3 = trunc i32 %x to i8
-  store i8 %x3, i8* %p, align 1
+  store i8 %x3, ptr %p, align 1
   %sh = lshr i32 %x, 16
   %x01 = trunc i32 %sh to i16
-  store i16 %x01, i16* %p2, align 1
+  store i16 %x01, ptr %p2, align 1
   ret void
 }
 
-define void @merge_hole2(i32 %x, i8* %p) {
+define void @merge_hole2(i32 %x, ptr %p) {
 ; CHECK-LABEL: merge_hole2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #16
 ; CHECK-NEXT:    strb w0, [x1]
 ; CHECK-NEXT:    strh w8, [x1, #2]
 ; CHECK-NEXT:    ret
-  %pcast = bitcast i8* %p to i16*
-  %p2 = getelementptr inbounds i16, i16* %pcast, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p, i64 1
   %sh = lshr i32 %x, 16
   %x01 = trunc i32 %sh to i16
-  store i16 %x01, i16* %p2, align 1
+  store i16 %x01, ptr %p2, align 1
   %x3 = trunc i32 %x to i8
-  store i8 %x3, i8* %p, align 1
+  store i8 %x3, ptr %p, align 1
   ret void
 }
 
-define void @merge_hole3(i32 %x, i8* %p) {
+define void @merge_hole3(i32 %x, ptr %p) {
 ; CHECK-LABEL: merge_hole3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #16
 ; CHECK-NEXT:    strb w0, [x1, #1]
 ; CHECK-NEXT:    strh w8, [x1, #2]
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  %pcast = bitcast i8* %p to i16*
-  %p2 = getelementptr inbounds i16, i16* %pcast, i64 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p, i64 1
   %x3 = trunc i32 %x to i8
-  store i8 %x3, i8* %p1, align 1
+  store i8 %x3, ptr %p1, align 1
   %sh = lshr i32 %x, 16
   %x01 = trunc i32 %sh to i16
-  store i16 %x01, i16* %p2, align 1
+  store i16 %x01, ptr %p2, align 1
   ret void
 }
 
-define void @merge_hole4(i32 %x, i8* %p) {
+define void @merge_hole4(i32 %x, ptr %p) {
 ; CHECK-LABEL: merge_hole4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #16
 ; CHECK-NEXT:    strb w0, [x1, #2]
 ; CHECK-NEXT:    strh w8, [x1]
 ; CHECK-NEXT:    ret
-  %pcast = bitcast i8* %p to i16*
-  %p2 = getelementptr inbounds i8, i8* %p, i64 2
+  %p2 = getelementptr inbounds i8, ptr %p, i64 2
   %x3 = trunc i32 %x to i8
-  store i8 %x3, i8* %p2, align 1
+  store i8 %x3, ptr %p2, align 1
   %sh = lshr i32 %x, 16
   %x01 = trunc i32 %sh to i16
-  store i16 %x01, i16* %pcast, align 1
+  store i16 %x01, ptr %p, align 1
   ret void
 }
 
-define dso_local i32 @load_between_stores(i32 %x, i16* %p, i32 *%ptr) {
+define dso_local i32 @load_between_stores(i32 %x, ptr %p, ptr %ptr) {
 ; CHECK-LABEL: load_between_stores:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w0, [x1]
@@ -267,14 +263,14 @@ define dso_local i32 @load_between_stores(i32 %x, i16* %p, i32 *%ptr) {
   %t1 = trunc i32 %x to i16
   %sh = lshr i32 %x, 16
   %t2 = trunc i32 %sh to i16
-  store i16 %t1, i16* %p, align 2
-  %ld = load i32, i32 *%ptr
-  %p1 = getelementptr inbounds i16, i16* %p, i64 1
-  store i16 %t2, i16* %p1, align 2
+  store i16 %t1, ptr %p, align 2
+  %ld = load i32, ptr %ptr
+  %p1 = getelementptr inbounds i16, ptr %p, i64 1
+  store i16 %t2, ptr %p1, align 2
   ret i32 %ld
 }
 
-define dso_local void @invalid_shift(i16 %x, i8* %p) {
+define dso_local void @invalid_shift(i16 %x, ptr %p) {
 ; CHECK-LABEL: invalid_shift:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #4, #12
@@ -284,13 +280,13 @@ define dso_local void @invalid_shift(i16 %x, i8* %p) {
   %t1 = trunc i16 %x to i8
   %sh = lshr i16 %x, 4
   %t2 = trunc i16 %sh to i8
-  store i8 %t1, i8* %p, align 1
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %t2, i8* %p1, align 1
+  store i8 %t1, ptr %p, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %t2, ptr %p1, align 1
   ret void
 }
 
-define dso_local void @missing_store(i32 %x, i8* %p) {
+define dso_local void @missing_store(i32 %x, ptr %p) {
 ; CHECK-LABEL: missing_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -304,15 +300,15 @@ define dso_local void @missing_store(i32 %x, i8* %p) {
   %t2 = trunc i32 %sh1 to i8
   %sh3 = lshr i32 %x, 24
   %t4 = trunc i32 %sh3 to i8
-  store i8 %t1, i8* %p, align 1
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %t2, i8* %p1, align 1
-  %p3 = getelementptr inbounds i8, i8* %p, i64 3
-  store i8 %t4, i8* %p3, align 1
+  store i8 %t1, ptr %p, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %t2, ptr %p1, align 1
+  %p3 = getelementptr inbounds i8, ptr %p, i64 3
+  store i8 %t4, ptr %p3, align 1
   ret void
 }
 
-define dso_local void @
diff erent_base_reg(i16 %x, i8* %p, i8 *%p2) {
+define dso_local void @
diff erent_base_reg(i16 %x, ptr %p, ptr %p2) {
 ; CHECK-LABEL: 
diff erent_base_reg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #8, #8
@@ -322,13 +318,13 @@ define dso_local void @
diff erent_base_reg(i16 %x, i8* %p, i8 *%p2) {
   %t1 = trunc i16 %x to i8
   %sh = lshr i16 %x, 8
   %t2 = trunc i16 %sh to i8
-  store i8 %t1, i8* %p, align 1
-  %p1 = getelementptr inbounds i8, i8* %p2, i64 1
-  store i8 %t2, i8* %p1, align 1
+  store i8 %t1, ptr %p, align 1
+  %p1 = getelementptr inbounds i8, ptr %p2, i64 1
+  store i8 %t2, ptr %p1, align 1
   ret void
 }
 
-define dso_local void @second_store_is_volatile(i16 %x, i8* %p) {
+define dso_local void @second_store_is_volatile(i16 %x, ptr %p) {
 ; CHECK-LABEL: second_store_is_volatile:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #8, #8
@@ -338,8 +334,8 @@ define dso_local void @second_store_is_volatile(i16 %x, i8* %p) {
   %t1 = trunc i16 %x to i8
   %sh = lshr i16 %x, 8
   %t2 = trunc i16 %sh to i8
-  store volatile i8 %t1, i8* %p, align 1
-  %p1 = getelementptr inbounds i8, i8* %p, i64 1
-  store i8 %t2, i8* %p1, align 1
+  store volatile i8 %t1, ptr %p, align 1
+  %p1 = getelementptr inbounds i8, ptr %p, i64 1
+  store i8 %t2, ptr %p1, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/no-neon-no-fp.ll b/llvm/test/CodeGen/AArch64/GlobalISel/no-neon-no-fp.ll
index 822c6252fbe3..324451bb1461 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/no-neon-no-fp.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/no-neon-no-fp.ll
@@ -4,9 +4,9 @@ target triple = "aarch64-unknown-unknown"
 
 ; We should fall back in the translator if we don't have no-neon/no-fp support.
 ; CHECK: Instruction selection used fallback path for foo
-define void @foo(i128 *%ptr) #0 align 2 {
+define void @foo(ptr %ptr) #0 align 2 {
 entry:
-  store i128 0, i128* %ptr, align 16
+  store i128 0, ptr %ptr, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/ret-1x-vec.ll b/llvm/test/CodeGen/AArch64/GlobalISel/ret-1x-vec.ll
index 3163d5dd2ea6..8bf741b5e6c8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/ret-1x-vec.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/ret-1x-vec.ll
@@ -14,22 +14,22 @@ define <1 x float> @ret_v1f32(<1 x float> %v) {
   ret <1 x float> %v
 }
 
-define <1 x i8*> @ret_v1p0(<1 x i8*> %v) {
+define <1 x ptr> @ret_v1p0(<1 x ptr> %v) {
   ; CHECK-LABEL: name: ret_v1p0
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $d0
   ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $d0
   ; CHECK:   $d0 = COPY [[COPY]](p0)
   ; CHECK:   RET_ReallyLR implicit $d0
-  ret <1 x i8*> %v
+  ret <1 x ptr> %v
 }
 
-define <1 x i8 addrspace(1)*> @ret_v1p1(<1 x i8 addrspace(1)*> %v) {
+define <1 x ptr addrspace(1)> @ret_v1p1(<1 x ptr addrspace(1)> %v) {
   ; CHECK-LABEL: name: ret_v1p1
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $d0
   ; CHECK:   [[COPY:%[0-9]+]]:_(p1) = COPY $d0
   ; CHECK:   $d0 = COPY [[COPY]](p1)
   ; CHECK:   RET_ReallyLR implicit $d0
-  ret <1 x i8 addrspace(1)*> %v
+  ret <1 x ptr addrspace(1)> %v
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
index 28bca50a7de8..c8fe31f54d88 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -O0 -global-isel -stop-after=irtranslator -o - %s | FileCheck %s
 
 ; Tests vectors of i1 types can appropriately extended first before return handles it.
-define <4 x i1> @ret_v4i1(<4 x i1> *%v) {
+define <4 x i1> @ret_v4i1(ptr %v) {
   ; CHECK-LABEL: name: ret_v4i1
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK:   liveins: $x0
@@ -11,6 +11,6 @@ define <4 x i1> @ret_v4i1(<4 x i1> *%v) {
   ; CHECK:   [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[LOAD]](<4 x s1>)
   ; CHECK:   $d0 = COPY [[ANYEXT]](<4 x s16>)
   ; CHECK:   RET_ReallyLR implicit $d0
-  %v2 = load <4 x i1>, <4 x i1> *%v
+  %v2 = load <4 x i1>, ptr %v
   ret <4 x i1> %v2
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-bitfield-insert.ll b/llvm/test/CodeGen/AArch64/GlobalISel/select-bitfield-insert.ll
index d7ba81a89d01..b8e236448596 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-bitfield-insert.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-bitfield-insert.ll
@@ -109,7 +109,7 @@ bb:
   ret i64 %out
 }
 
-define i64 @extra_use1(i64 %in1, i64 %in2, i64* %p) {
+define i64 @extra_use1(i64 %in1, i64 %in2, ptr %p) {
 ; GISEL-LABEL: extra_use1:
 ; GISEL:       ; %bb.0: ; %bb
 ; GISEL-NEXT:    lsl x8, x0, #1
@@ -129,11 +129,11 @@ bb:
   %tmp3 = shl i64 %in1, 1
   %tmp4 = and i64 %in2, 1
   %out = or i64 %tmp3, %tmp4
-  store i64 %tmp3, i64* %p
+  store i64 %tmp3, ptr %p
   ret i64 %out
 }
 
-define i64 @extra_use2(i64 %in1, i64 %in2, i64* %p) {
+define i64 @extra_use2(i64 %in1, i64 %in2, ptr %p) {
 ; GISEL-LABEL: extra_use2:
 ; GISEL:       ; %bb.0: ; %bb
 ; GISEL-NEXT:    and x8, x1, #0x1
@@ -152,6 +152,6 @@ bb:
   %tmp3 = shl i64 %in1, 1
   %tmp4 = and i64 %in2, 1
   %out = or i64 %tmp3, %tmp4
-  store i64 %tmp4, i64* %p
+  store i64 %tmp4, ptr %p
   ret i64 %out
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-frameaddr.ll b/llvm/test/CodeGen/AArch64/GlobalISel/select-frameaddr.ll
index 83bea900bb64..e825ea73721e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-frameaddr.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-frameaddr.ll
@@ -1,20 +1,20 @@
 ; RUN: llc -mtriple=arm64-apple-ios -global-isel -o - %s | FileCheck %s
 
-define i8* @rt0(i32 %x) nounwind readnone {
+define ptr @rt0(i32 %x) nounwind readnone {
 entry:
 ; CHECK-LABEL: rt0:
 ; CHECK: mov x0, x29
-  %0 = tail call i8* @llvm.frameaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.frameaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @rt2() nounwind readnone {
+define ptr @rt2() nounwind readnone {
 entry:
 ; CHECK-LABEL: rt2:
 ; CHECK: ldr x[[reg:[0-9]+]], [x29]
 ; CHECK: ldr x0, [x[[reg]]]
-  %0 = tail call i8* @llvm.frameaddress(i32 2)
-  ret i8* %0
+  %0 = tail call ptr @llvm.frameaddress(i32 2)
+  ret ptr %0
 }
 
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddr.ll b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddr.ll
index bced5554cddf..2e17a72f7bd9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddr.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddr.ll
@@ -1,15 +1,15 @@
 ; RUN: llc -mtriple=arm64-apple-ios -global-isel -o - %s | FileCheck %s
 
-define i8* @rt0(i32 %x) nounwind readnone {
+define ptr @rt0(i32 %x) nounwind readnone {
 entry:
 ; CHECK-LABEL: rt0:
 ; CHECK:       hint #7
 ; CHECK-NEXT:  mov x0, x30
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @rt0_call_clobber(i32 %x) nounwind readnone {
+define ptr @rt0_call_clobber(i32 %x) nounwind readnone {
 entry:
 ; CHECK-LABEL: rt0_call_clobber:
 ; CHECK:       stp x20, x19, [sp, #-32]!
@@ -22,11 +22,11 @@ entry:
 ; CHECK-NOT:   x0
 ; CHECK:       ret
   %ret = call i32 @foo()
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @rt2() nounwind readnone {
+define ptr @rt2() nounwind readnone {
 entry:
 ; CHECK-LABEL: rt2:
 ; CHECK:       ldr x[[reg:[0-9]+]], [x29]
@@ -36,10 +36,10 @@ entry:
 ; CHECK:       mov x0, x30
 ; CHECK-NOT:   x0
 ; CHECK:       ret
-  %0 = tail call i8* @llvm.returnaddress(i32 2)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 2)
+  ret ptr %0
 }
 
 
 declare i32 @foo()
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.ll b/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.ll
index e7382204ceae..0f525248b068 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-apple-ios -global-isel -global-isel-abort=1 - < %s | FileCheck %s
 
-define void @test_simple_2xs8(i8 *%ptr) {
+define void @test_simple_2xs8(ptr %ptr) {
 ; CHECK-LABEL: test_simple_2xs8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
@@ -9,28 +9,26 @@ define void @test_simple_2xs8(i8 *%ptr) {
 ; CHECK-NEXT:    strb w8, [x0]
 ; CHECK-NEXT:    strb w9, [x0, #1]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i8, i8 *%ptr, i64 0
-  store i8 4, i8 *%addr1
-  %addr2 = getelementptr i8, i8 *%ptr, i64 1
-  store i8 5, i8 *%addr2
+  store i8 4, ptr %ptr
+  %addr2 = getelementptr i8, ptr %ptr, i64 1
+  store i8 5, ptr %addr2
   ret void
 }
 
-define void @test_simple_2xs16(i16 *%ptr) {
+define void @test_simple_2xs16(ptr %ptr) {
 ; CHECK-LABEL: test_simple_2xs16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
 ; CHECK-NEXT:    movk w8, #5, lsl #16
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i16, i16 *%ptr, i64 0
-  store i16 4, i16 *%addr1
-  %addr2 = getelementptr i16, i16 *%ptr, i64 1
-  store i16 5, i16 *%addr2
+  store i16 4, ptr %ptr
+  %addr2 = getelementptr i16, ptr %ptr, i64 1
+  store i16 5, ptr %addr2
   ret void
 }
 
-define void @test_simple_4xs16(i16 *%ptr) {
+define void @test_simple_4xs16(ptr %ptr) {
 ; CHECK-LABEL: test_simple_4xs16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, #4
@@ -39,47 +37,44 @@ define void @test_simple_4xs16(i16 *%ptr) {
 ; CHECK-NEXT:    movk x8, #14, lsl #48
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i16, i16 *%ptr, i64 0
-  store i16 4, i16 *%addr1
-  %addr2 = getelementptr i16, i16 *%ptr, i64 1
-  store i16 5, i16 *%addr2
-  %addr3 = getelementptr i16, i16 *%ptr, i64 2
-  store i16 9, i16 *%addr3
-  %addr4 = getelementptr i16, i16 *%ptr, i64 3
-  store i16 14, i16 *%addr4
+  store i16 4, ptr %ptr
+  %addr2 = getelementptr i16, ptr %ptr, i64 1
+  store i16 5, ptr %addr2
+  %addr3 = getelementptr i16, ptr %ptr, i64 2
+  store i16 9, ptr %addr3
+  %addr4 = getelementptr i16, ptr %ptr, i64 3
+  store i16 14, ptr %addr4
   ret void
 }
 
-define void @test_simple_2xs32(i32 *%ptr) {
+define void @test_simple_2xs32(ptr %ptr) {
 ; CHECK-LABEL: test_simple_2xs32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, #4
 ; CHECK-NEXT:    movk x8, #5, lsl #32
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i32, i32 *%ptr, i64 0
-  store i32 4, i32 *%addr1
-  %addr2 = getelementptr i32, i32 *%ptr, i64 1
-  store i32 5, i32 *%addr2
+  store i32 4, ptr %ptr
+  %addr2 = getelementptr i32, ptr %ptr, i64 1
+  store i32 5, ptr %addr2
   ret void
 }
 
-define void @test_simple_2xs64_illegal(i64 *%ptr) {
+define void @test_simple_2xs64_illegal(ptr %ptr) {
 ; CHECK-LABEL: test_simple_2xs64_illegal:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
 ; CHECK-NEXT:    mov w9, #5
 ; CHECK-NEXT:    stp x8, x9, [x0]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i64, i64 *%ptr, i64 0
-  store i64 4, i64 *%addr1
-  %addr2 = getelementptr i64, i64 *%ptr, i64 1
-  store i64 5, i64 *%addr2
+  store i64 4, ptr %ptr
+  %addr2 = getelementptr i64, ptr %ptr, i64 1
+  store i64 5, ptr %addr2
   ret void
 }
 
 ; Don't merge vectors...yet.
-define void @test_simple_vector(<2 x i16> *%ptr) {
+define void @test_simple_vector(ptr %ptr) {
 ; CHECK-LABEL: test_simple_vector:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
@@ -91,14 +86,13 @@ define void @test_simple_vector(<2 x i16> *%ptr) {
 ; CHECK-NEXT:    strh w10, [x0, #4]
 ; CHECK-NEXT:    strh w11, [x0, #6]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr <2 x i16>, <2 x i16> *%ptr, i64 0
-  store <2 x i16> <i16 4, i16 7>, <2 x i16> *%addr1
-  %addr2 = getelementptr <2 x i16>, <2 x i16> *%ptr, i64 1
-  store <2 x i16> <i16 5, i16 8>, <2 x i16> *%addr2
+  store <2 x i16> <i16 4, i16 7>, ptr %ptr
+  %addr2 = getelementptr <2 x i16>, ptr %ptr, i64 1
+  store <2 x i16> <i16 5, i16 8>, ptr %addr2
   ret void
 }
 
-define i32 @test_unknown_alias(i32 *%ptr, i32 *%aliasptr) {
+define i32 @test_unknown_alias(ptr %ptr, ptr %aliasptr) {
 ; CHECK-LABEL: test_unknown_alias:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w9, #4
@@ -108,15 +102,14 @@ define i32 @test_unknown_alias(i32 *%ptr, i32 *%aliasptr) {
 ; CHECK-NEXT:    ldr w0, [x1]
 ; CHECK-NEXT:    str w9, [x8, #4]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i32, i32 *%ptr, i64 0
-  store i32 4, i32 *%addr1
-  %ld = load i32, i32 *%aliasptr
-  %addr2 = getelementptr i32, i32 *%ptr, i64 1
-  store i32 5, i32 *%addr2
+  store i32 4, ptr %ptr
+  %ld = load i32, ptr %aliasptr
+  %addr2 = getelementptr i32, ptr %ptr, i64 1
+  store i32 5, ptr %addr2
   ret i32 %ld
 }
 
-define void @test_2x_2xs32(i32 *%ptr, i32 *%ptr2) {
+define void @test_2x_2xs32(ptr %ptr, ptr %ptr2) {
 ; CHECK-LABEL: test_2x_2xs32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x10, #9
@@ -126,60 +119,55 @@ define void @test_2x_2xs32(i32 *%ptr, i32 *%ptr2) {
 ; CHECK-NEXT:    stp w8, w9, [x0]
 ; CHECK-NEXT:    str x10, [x1]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i32, i32 *%ptr, i64 0
-  store i32 4, i32 *%addr1
-  %addr2 = getelementptr i32, i32 *%ptr, i64 1
-  store i32 5, i32 *%addr2
+  store i32 4, ptr %ptr
+  %addr2 = getelementptr i32, ptr %ptr, i64 1
+  store i32 5, ptr %addr2
 
-  %addr3 = getelementptr i32, i32 *%ptr2, i64 0
-  store i32 9, i32 *%addr3
-  %addr4 = getelementptr i32, i32 *%ptr2, i64 1
-  store i32 17, i32 *%addr4
+  store i32 9, ptr %ptr2
+  %addr4 = getelementptr i32, ptr %ptr2, i64 1
+  store i32 17, ptr %addr4
   ret void
 }
 
-define void @test_simple_var_2xs8(i8 *%ptr, i8 %v1, i8 %v2) {
+define void @test_simple_var_2xs8(ptr %ptr, i8 %v1, i8 %v2) {
 ; CHECK-LABEL: test_simple_var_2xs8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb w1, [x0]
 ; CHECK-NEXT:    strb w2, [x0, #1]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i8, i8 *%ptr, i64 0
-  store i8 %v1, i8 *%addr1
-  %addr2 = getelementptr i8, i8 *%ptr, i64 1
-  store i8 %v2, i8 *%addr2
+  store i8 %v1, ptr %ptr
+  %addr2 = getelementptr i8, ptr %ptr, i64 1
+  store i8 %v2, ptr %addr2
   ret void
 }
 
-define void @test_simple_var_2xs16(i16 *%ptr, i16 %v1, i16 %v2) {
+define void @test_simple_var_2xs16(ptr %ptr, i16 %v1, i16 %v2) {
 ; CHECK-LABEL: test_simple_var_2xs16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w1, [x0]
 ; CHECK-NEXT:    strh w2, [x0, #2]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i16, i16 *%ptr, i64 0
-  store i16 %v1, i16 *%addr1
-  %addr2 = getelementptr i16, i16 *%ptr, i64 1
-  store i16 %v2, i16 *%addr2
+  store i16 %v1, ptr %ptr
+  %addr2 = getelementptr i16, ptr %ptr, i64 1
+  store i16 %v2, ptr %addr2
   ret void
 }
 
-define void @test_simple_var_2xs32(i32 *%ptr, i32 %v1, i32 %v2) {
+define void @test_simple_var_2xs32(ptr %ptr, i32 %v1, i32 %v2) {
 ; CHECK-LABEL: test_simple_var_2xs32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    stp w1, w2, [x0]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i32, i32 *%ptr, i64 0
-  store i32 %v1, i32 *%addr1
-  %addr2 = getelementptr i32, i32 *%ptr, i64 1
-  store i32 %v2, i32 *%addr2
+  store i32 %v1, ptr %ptr
+  %addr2 = getelementptr i32, ptr %ptr, i64 1
+  store i32 %v2, ptr %addr2
   ret void
 }
 
 
 ; The store to ptr2 prevents merging into a single store.
 ; We can still merge the stores into addr1 and addr2.
-define void @test_alias_4xs16(i16 *%ptr, i16 *%ptr2) {
+define void @test_alias_4xs16(ptr %ptr, ptr %ptr2) {
 ; CHECK-LABEL: test_alias_4xs16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
@@ -191,20 +179,19 @@ define void @test_alias_4xs16(i16 *%ptr, i16 *%ptr2) {
 ; CHECK-NEXT:    strh wzr, [x1]
 ; CHECK-NEXT:    strh w10, [x0, #6]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i16, i16 *%ptr, i64 0
-  store i16 4, i16 *%addr1
-  %addr2 = getelementptr i16, i16 *%ptr, i64 1
-  store i16 5, i16 *%addr2
-  %addr3 = getelementptr i16, i16 *%ptr, i64 2
-  store i16 9, i16 *%addr3
-  store i16 0, i16 *%ptr2
-  %addr4 = getelementptr i16, i16 *%ptr, i64 3
-  store i16 14, i16 *%addr4
+  store i16 4, ptr %ptr
+  %addr2 = getelementptr i16, ptr %ptr, i64 1
+  store i16 5, ptr %addr2
+  %addr3 = getelementptr i16, ptr %ptr, i64 2
+  store i16 9, ptr %addr3
+  store i16 0, ptr %ptr2
+  %addr4 = getelementptr i16, ptr %ptr, i64 3
+  store i16 14, ptr %addr4
   ret void
 }
 
 ; Here store of 5 and 9 can be merged, others have aliasing barriers.
-define void @test_alias2_4xs16(i16 *%ptr, i16 *%ptr2, i16* %ptr3) {
+define void @test_alias2_4xs16(ptr %ptr, ptr %ptr2, ptr %ptr3) {
 ; CHECK-LABEL: test_alias2_4xs16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
@@ -217,21 +204,20 @@ define void @test_alias2_4xs16(i16 *%ptr, i16 *%ptr2, i16* %ptr3) {
 ; CHECK-NEXT:    strh wzr, [x1]
 ; CHECK-NEXT:    strh w8, [x0, #6]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i16, i16 *%ptr, i64 0
-  store i16 4, i16 *%addr1
-  %addr2 = getelementptr i16, i16 *%ptr, i64 1
-  store i16 0, i16 *%ptr3
-  store i16 5, i16 *%addr2
-  %addr3 = getelementptr i16, i16 *%ptr, i64 2
-  store i16 9, i16 *%addr3
-  store i16 0, i16 *%ptr2
-  %addr4 = getelementptr i16, i16 *%ptr, i64 3
-  store i16 14, i16 *%addr4
+  store i16 4, ptr %ptr
+  %addr2 = getelementptr i16, ptr %ptr, i64 1
+  store i16 0, ptr %ptr3
+  store i16 5, ptr %addr2
+  %addr3 = getelementptr i16, ptr %ptr, i64 2
+  store i16 9, ptr %addr3
+  store i16 0, ptr %ptr2
+  %addr4 = getelementptr i16, ptr %ptr, i64 3
+  store i16 14, ptr %addr4
   ret void
 }
 
 ; No merging can be done here.
-define void @test_alias3_4xs16(i16 *%ptr, i16 *%ptr2, i16 *%ptr3, i16 *%ptr4) {
+define void @test_alias3_4xs16(ptr %ptr, ptr %ptr2, ptr %ptr3, ptr %ptr4) {
 ; CHECK-LABEL: test_alias3_4xs16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
@@ -246,22 +232,21 @@ define void @test_alias3_4xs16(i16 *%ptr, i16 *%ptr2, i16 *%ptr3, i16 *%ptr4) {
 ; CHECK-NEXT:    strh wzr, [x1]
 ; CHECK-NEXT:    strh w9, [x0, #6]
 ; CHECK-NEXT:    ret
-  %addr1 = getelementptr i16, i16 *%ptr, i64 0
-  store i16 4, i16 *%addr1
-  %addr2 = getelementptr i16, i16 *%ptr, i64 1
-  store i16 0, i16 *%ptr3
-  store i16 5, i16 *%addr2
-  store i16 0, i16 *%ptr4
-  %addr3 = getelementptr i16, i16 *%ptr, i64 2
-  store i16 9, i16 *%addr3
-  store i16 0, i16 *%ptr2
-  %addr4 = getelementptr i16, i16 *%ptr, i64 3
-  store i16 14, i16 *%addr4
+  store i16 4, ptr %ptr
+  %addr2 = getelementptr i16, ptr %ptr, i64 1
+  store i16 0, ptr %ptr3
+  store i16 5, ptr %addr2
+  store i16 0, ptr %ptr4
+  %addr3 = getelementptr i16, ptr %ptr, i64 2
+  store i16 9, ptr %addr3
+  store i16 0, ptr %ptr2
+  %addr4 = getelementptr i16, ptr %ptr, i64 3
+  store i16 14, ptr %addr4
   ret void
 }
 
 ; Can merge because the load is from a 
diff erent alloca and can't alias.
-define i32 @test_alias_allocas_2xs32(i32 *%ptr) {
+define i32 @test_alias_allocas_2xs32(ptr %ptr) {
 ; CHECK-LABEL: test_alias_allocas_2xs32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -274,15 +259,14 @@ define i32 @test_alias_allocas_2xs32(i32 *%ptr) {
 ; CHECK-NEXT:    ret
   %a1 = alloca [6 x i32]
   %a2 = alloca i32, align 4
-  %addr1 = getelementptr [6 x i32], [6 x i32] *%a1, i64 0, i32 0
-  store i32 4, i32 *%addr1
-  %ld = load i32, i32 *%a2
-  %addr2 = getelementptr [6 x i32], [6 x i32] *%a1, i64 0, i32 1
-  store i32 5, i32 *%addr2
+  store i32 4, ptr %a1
+  %ld = load i32, ptr %a2
+  %addr2 = getelementptr [6 x i32], ptr %a1, i64 0, i32 1
+  store i32 5, ptr %addr2
   ret i32 %ld
 }
 
-define void @test_volatile(i32 **%ptr) {
+define void @test_volatile(ptr %ptr) {
 ; CHECK-LABEL: test_volatile:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -290,15 +274,14 @@ define void @test_volatile(i32 **%ptr) {
 ; CHECK-NEXT:    str wzr, [x8, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32*, i32** %ptr, align 8
-  store volatile i32 0, i32* %0, align 4;
-  %1 = bitcast i32** %ptr to i8**
-  %add.ptr.i.i38 = getelementptr inbounds i32, i32* %0, i64 1
-  store volatile i32 0, i32* %add.ptr.i.i38, align 4
+  %0 = load ptr, ptr %ptr, align 8
+  store volatile i32 0, ptr %0, align 4;
+  %add.ptr.i.i38 = getelementptr inbounds i32, ptr %0, i64 1
+  store volatile i32 0, ptr %add.ptr.i.i38, align 4
   ret void
 }
 
-define void @test_atomic(i32 **%ptr) {
+define void @test_atomic(ptr %ptr) {
 ; CHECK-LABEL: test_atomic:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -307,10 +290,9 @@ define void @test_atomic(i32 **%ptr) {
 ; CHECK-NEXT:    stlr wzr, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32*, i32** %ptr, align 8
-  store atomic i32 0, i32* %0 release, align 4;
-  %1 = bitcast i32** %ptr to i8**
-  %add.ptr.i.i38 = getelementptr inbounds i32, i32* %0, i64 1
-  store atomic i32 0, i32* %add.ptr.i.i38 release, align 4
+  %0 = load ptr, ptr %ptr, align 8
+  store atomic i32 0, ptr %0 release, align 4;
+  %add.ptr.i.i38 = getelementptr inbounds i32, ptr %0, i64 1
+  store atomic i32 0, ptr %add.ptr.i.i38 release, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll b/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll
index dce894b10bd4..9dfb2696594b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll
@@ -1,12 +1,12 @@
 ; RUN: llc -verify-machineinstrs -frame-pointer=all -global-isel < %s -mtriple=aarch64-apple-ios | FileCheck %s
 
-declare i8* @malloc(i64)
-declare void @free(i8*)
+declare ptr @malloc(i64)
+declare void @free(ptr)
 %swift_error = type {i64, i8}
 
 ; This tests the basic usage of a swifterror parameter. "foo" is the function
 ; that takes a swifterror parameter and "caller" is the caller of "foo".
-define float @foo(%swift_error** swifterror %error_ptr_ref) {
+define float @foo(ptr swifterror %error_ptr_ref) {
 ; CHECK-LABEL: foo:
 ; CHECK: mov w0, #16
 ; CHECK: malloc
@@ -16,16 +16,15 @@ define float @foo(%swift_error** swifterror %error_ptr_ref) {
 ; CHECK-NOT: x21
 
 entry:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
   ret float 1.0
 }
 
 ; "caller" calls "foo" that takes a swifterror parameter.
-define float @caller(i8* %error_ref) {
+define float @caller(ptr %error_ref) {
 ; CHECK-LABEL: caller:
 ; CHECK: mov [[ID:x[0-9]+]], x0
 ; CHECK: bl {{.*}}foo
@@ -37,25 +36,24 @@ define float @caller(i8* %error_ref) {
 ; CHECK: bl {{.*}}free
 
 entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  %call = call float @foo(%swift_error** swifterror %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
+  %call = call float @foo(ptr swifterror %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; "caller2" is the caller of "foo", it calls "foo" inside a loop.
-define float @caller2(i8* %error_ref) {
+define float @caller2(ptr %error_ref) {
 ; CHECK-LABEL: caller2:
 ; CHECK: mov [[ID:x[0-9]+]], x0
 ; CHECK: fmov [[CMP:s[0-9]+]], #1.0
@@ -71,31 +69,30 @@ define float @caller2(i8* %error_ref) {
 ; CHECK: bl {{.*}}free
 
 entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
+  %error_ptr_ref = alloca swifterror ptr
   br label %bb_loop
 bb_loop:
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  %call = call float @foo(%swift_error** swifterror %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  store ptr null, ptr %error_ptr_ref
+  %call = call float @foo(ptr swifterror %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
   %cmp = fcmp ogt float %call, 1.000000e+00
   br i1 %cmp, label %bb_end, label %bb_loop
 bb_end:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; "foo_if" is a function that takes a swifterror parameter, it sets swifterror
 ; under a certain condition.
-define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) {
+define float @foo_if(ptr swifterror %error_ptr_ref, i32 %cc) {
 ; CHECK-LABEL: foo_if:
 ; CHECK: cbz w0
 ; CHECK: mov w0, #16
@@ -111,11 +108,10 @@ entry:
   br i1 %cond, label %gen_error, label %normal
 
 gen_error:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
   ret float 1.0
 
 normal:
@@ -124,7 +120,7 @@ normal:
 
 ; "foo_loop" is a function that takes a swifterror parameter, it sets swifterror
 ; under a certain condition inside a loop.
-define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) {
+define float @foo_loop(ptr swifterror %error_ptr_ref, i32 %cc, float %cc2) {
 ; CHECK-LABEL: foo_loop:
 ; CHECK: cbz
 ; CHECK: mov w0, #16
@@ -141,11 +137,10 @@ bb_loop:
   br i1 %cond, label %gen_error, label %bb_cont
 
 gen_error:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
   br label %bb_cont
 
 bb_cont:
@@ -159,7 +154,7 @@ bb_end:
 
 ; "foo_sret" is a function that takes a swifterror parameter, it also has a sret
 ; parameter.
-define void @foo_sret(%struct.S* sret(%struct.S) %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) {
+define void @foo_sret(ptr sret(%struct.S) %agg.result, i32 %val1, ptr swifterror %error_ptr_ref) {
 ; CHECK-LABEL: foo_sret:
 ; CHECK-DAG: mov [[SRET:x[0-9]+]], x8
 ; CHECK-DAG: mov w0, #16
@@ -171,18 +166,17 @@ define void @foo_sret(%struct.S* sret(%struct.S) %agg.result, i32 %val1, %swift_
 ; CHECK-NOT: x21
 
 entry:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
-  %v2 = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1
-  store i32 %val1, i32* %v2
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
+  %v2 = getelementptr inbounds %struct.S, ptr %agg.result, i32 0, i32 1
+  store i32 %val1, ptr %v2
   ret void
 }
 
 ; "caller3" calls "foo_sret" that takes a swifterror parameter.
-define float @caller3(i8* %error_ref) {
+define float @caller3(ptr %error_ref) {
 ; CHECK-LABEL: caller3:
 ; CHECK: mov [[ID:x[0-9]+]], x0
 ; CHECK: mov [[ZERO:x[0-9]+]], xzr
@@ -196,27 +190,26 @@ define float @caller3(i8* %error_ref) {
 
 entry:
   %s = alloca %struct.S, align 8
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call void @foo_sret(%struct.S* sret(%struct.S) %s, i32 1, %swift_error** swifterror %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
+  call void @foo_sret(ptr sret(%struct.S) %s, i32 1, ptr swifterror %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; "foo_vararg" is a function that takes a swifterror parameter, it also has
 ; variable number of arguments.
-declare void @llvm.va_start(i8*) nounwind
-define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
+declare void @llvm.va_start(ptr) nounwind
+define float @foo_vararg(ptr swifterror %error_ptr_ref, ...) {
 ; CHECK-LABEL: foo_vararg:
 ; CHECK: mov w0, #16
 ; CHECK: malloc
@@ -236,30 +229,28 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
 ; CHECK: ldr {{w[0-9]+}}, [x[[ARG1]]]
 ; CHECK-NOT: x21
 entry:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
 
-  %args = alloca i8*, align 8
+  %args = alloca ptr, align 8
   %a10 = alloca i32, align 4
   %a11 = alloca i32, align 4
   %a12 = alloca i32, align 4
-  %v10 = bitcast i8** %args to i8*
-  call void @llvm.va_start(i8* %v10)
-  %v11 = va_arg i8** %args, i32
-  store i32 %v11, i32* %a10, align 4
-  %v12 = va_arg i8** %args, i32
-  store i32 %v12, i32* %a11, align 4
-  %v13 = va_arg i8** %args, i32
-  store i32 %v13, i32* %a12, align 4
+  call void @llvm.va_start(ptr %args)
+  %v11 = va_arg ptr %args, i32
+  store i32 %v11, ptr %a10, align 4
+  %v12 = va_arg ptr %args, i32
+  store i32 %v12, ptr %a11, align 4
+  %v13 = va_arg ptr %args, i32
+  store i32 %v13, ptr %a12, align 4
 
   ret float 1.0
 }
 
 ; "caller4" calls "foo_vararg" that takes a swifterror parameter.
-define float @caller4(i8* %error_ref) {
+define float @caller4(ptr %error_ref) {
 ; CHECK-LABEL: caller4:
 
 ; CHECK: mov x21, xzr
@@ -275,44 +266,43 @@ define float @caller4(i8* %error_ref) {
 ; CHECK: strb [[CODE]], [{{.*}}[[ID]]]
 ; CHECK: bl {{.*}}free
 entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
 
   %a10 = alloca i32, align 4
   %a11 = alloca i32, align 4
   %a12 = alloca i32, align 4
-  store i32 10, i32* %a10, align 4
-  store i32 11, i32* %a11, align 4
-  store i32 12, i32* %a12, align 4
-  %v10 = load i32, i32* %a10, align 4
-  %v11 = load i32, i32* %a11, align 4
-  %v12 = load i32, i32* %a12, align 4
-
-  %call = call float (%swift_error**, ...) @foo_vararg(%swift_error** swifterror %error_ptr_ref, i32 %v10, i32 %v11, i32 %v12)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  store i32 10, ptr %a10, align 4
+  store i32 11, ptr %a11, align 4
+  store i32 12, ptr %a12, align 4
+  %v10 = load i32, ptr %a10, align 4
+  %v11 = load i32, ptr %a11, align 4
+  %v12 = load i32, ptr %a12, align 4
+
+  %call = call float (ptr, ...) @foo_vararg(ptr swifterror %error_ptr_ref, i32 %v10, i32 %v11, i32 %v12)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; Check that we don't blow up on tail calling swifterror argument functions.
-define float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref) {
+define float @tailcallswifterror(ptr swifterror %error_ptr_ref) {
 entry:
-  %0 = tail call float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref)
+  %0 = tail call float @tailcallswifterror(ptr swifterror %error_ptr_ref)
   ret float %0
 }
-define swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %error_ptr_ref) {
+define swiftcc float @tailcallswifterror_swiftcc(ptr swifterror %error_ptr_ref) {
 entry:
-  %0 = tail call swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %error_ptr_ref)
+  %0 = tail call swiftcc float @tailcallswifterror_swiftcc(ptr swifterror %error_ptr_ref)
   ret float %0
 }
 
@@ -372,14 +362,14 @@ entry:
 ; CHECK:  ldr     x28, [sp
 ; CHECK-NOT: x21
 ; CHECK:  ret
-define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8*, %swift_error** nocapture swifterror %err) {
-  %error_ptr_ref = alloca swifterror %swift_error*, align 8
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8*  null, %swift_error** nocapture swifterror %error_ptr_ref)
-  call swiftcc void @params_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i8*  %8, %swift_error** nocapture swifterror %err)
+define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, ptr, ptr nocapture swifterror %err) {
+  %error_ptr_ref = alloca swifterror ptr, align 8
+  store ptr null, ptr %error_ptr_ref
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, ptr  null, ptr nocapture swifterror %error_ptr_ref)
+  call swiftcc void @params_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, ptr  %8, ptr nocapture swifterror %err)
   ret void
 }
-declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* , %swift_error** nocapture swifterror %err)
+declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, ptr , ptr nocapture swifterror %err)
 
 ; CHECK-LABEL: params_and_return_in_reg
 ; Store callee saved registers.
@@ -462,18 +452,18 @@ declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8*
 ; CHECK:  ldp     x27, x26, [sp
 ; CHECK:  ldr     x28, [sp
 ; CHECK:  ret
-define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* , %swift_error** nocapture swifterror %err) {
-  %error_ptr_ref = alloca swifterror %swift_error*, align 8
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8*  null, %swift_error** nocapture swifterror %error_ptr_ref)
-  %val = call swiftcc  { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i8*  %8, %swift_error** nocapture swifterror %err)
-  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8*  null, %swift_error** nocapture swifterror %error_ptr_ref)
+define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, ptr , ptr nocapture swifterror %err) {
+  %error_ptr_ref = alloca swifterror ptr, align 8
+  store ptr null, ptr %error_ptr_ref
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, ptr  null, ptr nocapture swifterror %error_ptr_ref)
+  %val = call swiftcc  { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, ptr  %8, ptr nocapture swifterror %err)
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, ptr  null, ptr nocapture swifterror %error_ptr_ref)
   ret { i64, i64, i64, i64, i64, i64, i64, i64 } %val
 }
 
-declare swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* , %swift_error** nocapture swifterror %err)
+declare swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, ptr , ptr nocapture swifterror %err)
 
-declare void @acallee(i8*)
+declare void @acallee(ptr)
 
 ; Make sure we don't tail call if the caller returns a swifterror value. We
 ; would have to move into the swifterror register before the tail call.
@@ -481,9 +471,9 @@ declare void @acallee(i8*)
 ; CHECK-NOT: b _acallee
 ; CHECK: bl _acallee
 
-define swiftcc void @tailcall_from_swifterror(%swift_error** swifterror %error_ptr_ref) {
+define swiftcc void @tailcall_from_swifterror(ptr swifterror %error_ptr_ref) {
 entry:
-  tail call void @acallee(i8* null)
+  tail call void @acallee(ptr null)
   ret void
 }
 
@@ -491,32 +481,32 @@ entry:
 ; CHECK-NOT: b _simple_fn
 ; CHECK: bl _simple_fn
 declare void @simple_fn()
-define swiftcc void @tailcall_from_swifterror2(%swift_error** swifterror %error_ptr_ref) {
+define swiftcc void @tailcall_from_swifterror2(ptr swifterror %error_ptr_ref) {
   tail call void @simple_fn()
   ret void
 }
 
-declare swiftcc void @foo2(%swift_error** swifterror)
+declare swiftcc void @foo2(ptr swifterror)
 ; CHECK-LABEL: testAssign
 ; CHECK: mov      x21, xzr
 ; CHECK: bl      _foo2
 ; CHECK: mov      x0, x21
 
-define swiftcc %swift_error* @testAssign(i8* %error_ref) {
+define swiftcc ptr @testAssign(ptr %error_ref) {
 entry:
-  %error_ptr = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr
-  call swiftcc void @foo2(%swift_error** swifterror %error_ptr)
+  %error_ptr = alloca swifterror ptr
+  store ptr null, ptr %error_ptr
+  call swiftcc void @foo2(ptr swifterror %error_ptr)
   br label %a
 
 a:
-  %error = load %swift_error*, %swift_error** %error_ptr
-  ret %swift_error* %error
+  %error = load ptr, ptr %error_ptr
+  ret ptr %error
 }
 
 ; foo takes a swifterror parameter. We should be able to see that even when
 ; it isn't explicitly on the call.
-define float @swifterror_param_not_on_call(i8* %error_ref) {
+define float @swifterror_param_not_on_call(ptr %error_ref) {
 ; CHECK-LABEL: swifterror_param_not_on_call:
 ; CHECK: mov [[ID:x[0-9]+]], x0
 ; CHECK: bl {{.*}}foo
@@ -528,26 +518,25 @@ define float @swifterror_param_not_on_call(i8* %error_ref) {
 ; CHECK: bl {{.*}}free
 
 entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  %call = call float @foo(%swift_error** %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
+  %call = call float @foo(ptr %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; foo_sret takes an sret parameter and a swifterror parameter. We should be
 ; able to see that, even if it's not explicitly on the call.
-define float @swifterror_param_not_on_call2(i8* %error_ref) {
+define float @swifterror_param_not_on_call2(ptr %error_ref) {
 ; CHECK-LABEL: swifterror_param_not_on_call2:
 ; CHECK: mov [[ID:x[0-9]+]], x0
 ; CHECK: mov [[ZERO:x[0-9]+]], xzr
@@ -561,19 +550,18 @@ define float @swifterror_param_not_on_call2(i8* %error_ref) {
 
 entry:
   %s = alloca %struct.S, align 8
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call void @foo_sret(%struct.S* %s, i32 1, %swift_error** %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
+  call void @foo_sret(ptr %s, i32 1, ptr %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/swiftself.ll b/llvm/test/CodeGen/AArch64/GlobalISel/swiftself.ll
index 65ddcd9d8b00..b8c9102af3bb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/swiftself.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/swiftself.ll
@@ -4,8 +4,8 @@
 ; CHECK-LABEL: swiftself_param:
 ; CHECK: mov x0, x20
 ; CHECK-NEXT: ret
-define i8* @swiftself_param(i8* swiftself %addr0) {
-  ret i8 *%addr0
+define ptr @swiftself_param(ptr swiftself %addr0) {
+  ret ptr %addr0
 }
 
 ; Check that x20 is used to pass a swiftself argument.
@@ -13,9 +13,9 @@ define i8* @swiftself_param(i8* swiftself %addr0) {
 ; CHECK: mov x20, x0
 ; CHECK: bl {{_?}}swiftself_param
 ; CHECK: ret
-define i8 *@call_swiftself(i8* %arg) {
-  %res = call i8 *@swiftself_param(i8* swiftself %arg)
-  ret i8 *%res
+define ptr @call_swiftself(ptr %arg) {
+  %res = call ptr @swiftself_param(ptr swiftself %arg)
+  ret ptr %res
 }
 
 ; Demonstrate that we do not need any movs when calling multiple functions
@@ -26,9 +26,9 @@ define i8 *@call_swiftself(i8* %arg) {
 ; CHECK-NOT: mov{{.*}}x20
 ; CHECK-NEXT: bl {{_?}}swiftself_param
 ; CHECK: ret
-define void @swiftself_passthrough(i8* swiftself %addr0) {
-  call i8 *@swiftself_param(i8* swiftself %addr0)
-  call i8 *@swiftself_param(i8* swiftself %addr0)
+define void @swiftself_passthrough(ptr swiftself %addr0) {
+  call ptr @swiftself_param(ptr swiftself %addr0)
+  call ptr @swiftself_param(ptr swiftself %addr0)
   ret void
 }
 
@@ -38,26 +38,26 @@ define void @swiftself_passthrough(i8* swiftself %addr0) {
 ; CHECK: mov x20, x0
 ; CHECK: bl {{_?}}swiftself_param
 ; CHECK: ret
-define i8* @swiftself_notail(i8* swiftself %addr0, i8* %addr1) nounwind {
-  %res = tail call i8* @swiftself_param(i8* swiftself %addr1)
-  ret i8* %res
+define ptr @swiftself_notail(ptr swiftself %addr0, ptr %addr1) nounwind {
+  %res = tail call ptr @swiftself_param(ptr swiftself %addr1)
+  ret ptr %res
 }
 
 ; We cannot pretend that 'x0' is alive across the thisreturn_attribute call as
 ; we normally would. We marked the first parameter with swiftself which means it
 ; will no longer be passed in x0.
-declare swiftcc i8* @thisreturn_attribute(i8* returned swiftself)
+declare swiftcc ptr @thisreturn_attribute(ptr returned swiftself)
 ; CHECK-LABEL: swiftself_nothisreturn:
 ; CHECK-DAG: ldr  x20, [x20]
 ; CHECK-DAG: mov [[CSREG:x[1-9].*]], x8
 ; CHECK: bl {{_?}}thisreturn_attribute
 ; CHECK: str x0, [[[CSREG]]
 ; CHECK: ret
-define hidden swiftcc void @swiftself_nothisreturn(i8** noalias nocapture sret(i8*), i8** noalias nocapture readonly swiftself) {
+define hidden swiftcc void @swiftself_nothisreturn(ptr noalias nocapture sret(ptr), ptr noalias nocapture readonly swiftself) {
 entry:
-  %2 = load i8*, i8** %1, align 8
-  %3 = tail call swiftcc i8* @thisreturn_attribute(i8* swiftself %2)
-  store i8* %3, i8** %0, align 8
+  %2 = load ptr, ptr %1, align 8
+  %3 = tail call swiftcc ptr @thisreturn_attribute(ptr swiftself %2)
+  store ptr %3, ptr %0, align 8
   ret void
 }
 
@@ -67,7 +67,7 @@ entry:
 ; CHECK: mov x20, x0
 ; CHECK: bl {{_?}}swiftself_param
 ; CHECK: ret
-define i8 *@swiftself_not_on_call_params(i8* %arg) {
-  %res = call i8 *@swiftself_param(i8* %arg)
-  ret i8 *%res
+define ptr @swiftself_not_on_call_params(ptr %arg) {
+  %res = call ptr @swiftself_param(ptr %arg)
+  ret ptr %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-constant-dag.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-constant-dag.ll
index 1f91d8d06e8d..9a025aa35b87 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-constant-dag.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-constant-dag.ll
@@ -3,7 +3,7 @@
 
 %dag = type { { { i8, { i8 } }, { { i8, { i8 } }, { i8 } } }, { { i8, { i8 } }, { i8 } } }
 
-define void @test_const(%dag* %dst) {
+define void @test_const(ptr %dst) {
   ; CHECK-LABEL: name: test_const
   ; CHECK: bb.1.entry:
   ; CHECK:   liveins: $x0
@@ -63,7 +63,7 @@ entry:
    },
    0,
    1
- store %dag %updated, %dag* %dst
+ store %dag %updated, ptr %dst
  ; 10, 20, 10, 20, 50, 10, 20, 20 sequence is expected
 
  store
@@ -91,7 +91,7 @@ entry:
        { i8 } { i8 20 }
      }
    },
-   %dag* %dst
+   ptr %dst
  ; 10, 20, 10, 20, 20, 10, 20, 20 sequence is expected
  ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
index d7c5629d1937..a916fb2bf123 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
@@ -3,7 +3,7 @@
 
 %type = type [4 x {i8, i32}]
 
-define i8*  @translate_element_size1(i64 %arg) {
+define ptr @translate_element_size1(i64 %arg) {
   ; CHECK-LABEL: name: translate_element_size1
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   liveins: $x0
@@ -14,11 +14,11 @@ define i8*  @translate_element_size1(i64 %arg) {
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK-NEXT:   $x0 = COPY [[COPY1]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-  %tmp = getelementptr i8, i8* null, i64 %arg
-  ret i8* %tmp
+  %tmp = getelementptr i8, ptr null, i64 %arg
+  ret ptr %tmp
 }
 
-define %type* @first_offset_const(%type* %addr) {
+define ptr @first_offset_const(ptr %addr) {
 
   ; CHECK-LABEL: name: first_offset_const
   ; CHECK: bb.1 (%ir-block.0):
@@ -29,11 +29,11 @@ define %type* @first_offset_const(%type* %addr) {
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
   ; CHECK-NEXT:   $x0 = COPY [[PTR_ADD]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-  %res = getelementptr %type, %type* %addr, i32 1
-  ret %type* %res
+  %res = getelementptr %type, ptr %addr, i32 1
+  ret ptr %res
 }
 
-define %type* @first_offset_trivial(%type* %addr) {
+define ptr @first_offset_trivial(ptr %addr) {
 
   ; CHECK-LABEL: name: first_offset_trivial
   ; CHECK: bb.1 (%ir-block.0):
@@ -43,11 +43,11 @@ define %type* @first_offset_trivial(%type* %addr) {
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
   ; CHECK-NEXT:   $x0 = COPY [[COPY1]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-  %res = getelementptr %type, %type* %addr, i32 0
-  ret %type* %res
+  %res = getelementptr %type, ptr %addr, i32 0
+  ret ptr %res
 }
 
-define %type* @first_offset_variable(%type* %addr, i64 %idx) {
+define ptr @first_offset_variable(ptr %addr, i64 %idx) {
 
   ; CHECK-LABEL: name: first_offset_variable
   ; CHECK: bb.1 (%ir-block.0):
@@ -61,11 +61,11 @@ define %type* @first_offset_variable(%type* %addr, i64 %idx) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK-NEXT:   $x0 = COPY [[COPY2]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-  %res = getelementptr %type, %type* %addr, i64 %idx
-  ret %type* %res
+  %res = getelementptr %type, ptr %addr, i64 %idx
+  ret ptr %res
 }
 
-define %type* @first_offset_ext(%type* %addr, i32 %idx) {
+define ptr @first_offset_ext(ptr %addr, i32 %idx) {
 
   ; CHECK-LABEL: name: first_offset_ext
   ; CHECK: bb.1 (%ir-block.0):
@@ -80,12 +80,12 @@ define %type* @first_offset_ext(%type* %addr, i32 %idx) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
   ; CHECK-NEXT:   $x0 = COPY [[COPY2]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-  %res = getelementptr %type, %type* %addr, i32 %idx
-  ret %type* %res
+  %res = getelementptr %type, ptr %addr, i32 %idx
+  ret ptr %res
 }
 
 %type1 = type [4 x [4 x i32]]
-define i32* @const_then_var(%type1* %addr, i64 %idx) {
+define ptr @const_then_var(ptr %addr, i64 %idx) {
 
   ; CHECK-LABEL: name: const_then_var
   ; CHECK: bb.1 (%ir-block.0):
@@ -101,11 +101,11 @@ define i32* @const_then_var(%type1* %addr, i64 %idx) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD1]](p0)
   ; CHECK-NEXT:   $x0 = COPY [[COPY2]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-  %res = getelementptr %type1, %type1* %addr, i32 4, i32 1, i64 %idx
-  ret i32* %res
+  %res = getelementptr %type1, ptr %addr, i32 4, i32 1, i64 %idx
+  ret ptr %res
 }
 
-define i32* @var_then_const(%type1* %addr, i64 %idx) {
+define ptr @var_then_const(ptr %addr, i64 %idx) {
 
   ; CHECK-LABEL: name: var_then_const
   ; CHECK: bb.1 (%ir-block.0):
@@ -120,13 +120,13 @@ define i32* @var_then_const(%type1* %addr, i64 %idx) {
   ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
   ; CHECK-NEXT:   $x0 = COPY [[PTR_ADD1]](p0)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
-  %res = getelementptr %type1, %type1* %addr, i64 %idx, i32 2, i32 2
-  ret i32* %res
+  %res = getelementptr %type1, ptr %addr, i64 %idx, i32 2, i32 2
+  ret ptr %res
 }
 
 @arr = external global [8 x i32]
 
-define <2 x i32*> @vec_gep_scalar_base(<2 x i64> %offs) {
+define <2 x ptr> @vec_gep_scalar_base(<2 x i64> %offs) {
   ; CHECK-LABEL: name: vec_gep_scalar_base
   ; CHECK: bb.1.entry:
   ; CHECK-NEXT:   liveins: $q0
@@ -142,6 +142,6 @@ define <2 x i32*> @vec_gep_scalar_base(<2 x i64> %offs) {
   ; CHECK-NEXT:   $q0 = COPY [[COPY1]](<2 x p0>)
   ; CHECK-NEXT:   RET_ReallyLR implicit $q0
 entry:
-  %0 = getelementptr inbounds [8 x i32], [8 x i32]* @arr, i64 0, <2 x i64> %offs
-  ret <2 x i32*> %0
+  %0 = getelementptr inbounds [8 x i32], ptr @arr, i64 0, <2 x i64> %offs
+  ret <2 x ptr> %0
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-ret.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-ret.ll
index eb99454857f0..cf620cc52b90 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-ret.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-ret.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -mtriple=arm64-apple-ios %s -o - -global-isel -global-isel-abort=1 -stop-after=irtranslator | FileCheck %s
 
-define i128 @func_i128(i128* %ptr) {
+define i128 @func_i128(ptr %ptr) {
 
   ; CHECK-LABEL: name: func_i128
   ; CHECK: bb.1 (%ir-block.0):
@@ -13,11 +13,11 @@ define i128 @func_i128(i128* %ptr) {
   ; CHECK-NEXT:   $x0 = COPY [[UV]](s64)
   ; CHECK-NEXT:   $x1 = COPY [[UV1]](s64)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0, implicit $x1
-  %val = load i128, i128* %ptr
+  %val = load i128, ptr %ptr
   ret i128 %val
 }
 
-define <8 x float> @func_v8f32(<8 x float>* %ptr) {
+define <8 x float> @func_v8f32(ptr %ptr) {
 
   ; CHECK-LABEL: name: func_v8f32
   ; CHECK: bb.1 (%ir-block.0):
@@ -29,12 +29,12 @@ define <8 x float> @func_v8f32(<8 x float>* %ptr) {
   ; CHECK-NEXT:   $q0 = COPY [[UV]](<4 x s32>)
   ; CHECK-NEXT:   $q1 = COPY [[UV1]](<4 x s32>)
   ; CHECK-NEXT:   RET_ReallyLR implicit $q0, implicit $q1
-  %val = load <8 x float>, <8 x float>* %ptr
+  %val = load <8 x float>, ptr %ptr
   ret <8 x float> %val
 }
 
 ; A bit weird, but s0-s5 is what SDAG does too.
-define <6 x float> @func_v6f32(<6 x float>* %ptr) {
+define <6 x float> @func_v6f32(ptr %ptr) {
 
   ; CHECK-LABEL: name: func_v6f32
   ; CHECK: bb.1 (%ir-block.0):
@@ -50,7 +50,7 @@ define <6 x float> @func_v6f32(<6 x float>* %ptr) {
   ; CHECK-NEXT:   $s4 = COPY [[UV4]](s32)
   ; CHECK-NEXT:   $s5 = COPY [[UV5]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $s0, implicit $s1, implicit $s2, implicit $s3, implicit $s4, implicit $s5
-  %val = load <6 x float>, <6 x float>* %ptr
+  %val = load <6 x float>, ptr %ptr
   ret <6 x float> %val
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
index 34b2a5626cc2..21bbecb9760e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -O0 -mtriple=arm64 < %s
 
-declare i8* @llvm.launder.invariant.group(i8*)
+declare ptr @llvm.launder.invariant.group(ptr)
 
-define i8* @barrier(i8* %p) {
+define ptr @barrier(ptr %p) {
 ; CHECK: bl llvm.launder.invariant.group
-        %q = call i8* @llvm.launder.invariant.group(i8* %p)
-        ret i8* %q
+        %q = call ptr @llvm.launder.invariant.group(ptr %p)
+        ret ptr %q
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll
index f32c34a93ccb..7426fe988da8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll
@@ -10,7 +10,7 @@ entry:
   unreachable
 }
 
-define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @test() personality ptr @__gxx_personality_v0 {
 entry:
 
 ; CHECK-LABEL: test:
@@ -25,17 +25,17 @@ invoke.cont:
   ret void
 
 lpad:
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
 ; CHECK: bl	printf
-  call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0))
-  resume { i8*, i32 } %0
+  call void (ptr, ...) @printf(ptr @.str.2)
+  resume { ptr, i32 } %0
 
 }
 
 declare dso_local i32 @__gxx_personality_v0(...)
 
-declare dso_local void @printf(i8*, ...)
+declare dso_local void @printf(ptr, ...)
 
 ; Exception table generation around the inline assembly
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll b/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll
index 397d69e93c5f..fe7e24c2d8ba 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll
@@ -1,212 +1,192 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+v8.4a %s -o - -global-isel=1 -global-isel-abort=1 | FileCheck %s
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+lse2 %s -o - -global-isel=1 -global-isel-abort=1 | FileCheck %s
 
-define void @test_atomic_load(i128* %addr) {
+define void @test_atomic_load(ptr %addr) {
 ; CHECK-LABEL: test_atomic_load:
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %res.0 = load atomic i128, i128* %addr monotonic, align 16
-  store i128 %res.0, i128* %addr
+  %res.0 = load atomic i128, ptr %addr monotonic, align 16
+  store i128 %res.0, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %res.1 = load atomic i128, i128* %addr unordered, align 16
-  store i128 %res.1, i128* %addr
+  %res.1 = load atomic i128, ptr %addr unordered, align 16
+  store i128 %res.1, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: dmb ish
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %res.2 = load atomic i128, i128* %addr acquire, align 16
-  store i128 %res.2, i128* %addr
+  %res.2 = load atomic i128, ptr %addr acquire, align 16
+  store i128 %res.2, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: dmb ish
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %res.3 = load atomic i128, i128* %addr seq_cst, align 16
-  store i128 %res.3, i128* %addr
+  %res.3 = load atomic i128, ptr %addr seq_cst, align 16
+  store i128 %res.3, ptr %addr
 
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #8]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 8
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.5 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.5, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 8
+  %res.5 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.5, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #504]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %addr8.2 = getelementptr i8,  i8* %addr8, i32 504
-  %addr128.2 = bitcast i8* %addr8.2 to i128*
-  %res.6 = load atomic i128, i128* %addr128.2 monotonic, align 16
-  store i128 %res.6, i128* %addr
+  %addr8.2 = getelementptr i8,  ptr %addr, i32 504
+  %res.6 = load atomic i128, ptr %addr8.2 monotonic, align 16
+  store i128 %res.6, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #-512]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %addr8.3 = getelementptr i8,  i8* %addr8, i32 -512
-  %addr128.3 = bitcast i8* %addr8.3 to i128*
-  %res.7 = load atomic i128, i128* %addr128.3 monotonic, align 16
-  store i128 %res.7, i128* %addr
+  %addr8.3 = getelementptr i8,  ptr %addr, i32 -512
+  %res.7 = load atomic i128, ptr %addr8.3 monotonic, align 16
+  store i128 %res.7, ptr %addr
 
   ret void
 }
 
-define void @test_libcall_load(i128* %addr) {
+define void @test_libcall_load(ptr %addr) {
 ; CHECK-LABEL: test_libcall_load:
 ; CHECK: bl __atomic_load
-  %res.8 = load atomic i128, i128* %addr unordered, align 8
-  store i128 %res.8, i128* %addr
+  %res.8 = load atomic i128, ptr %addr unordered, align 8
+  store i128 %res.8, ptr %addr
 
   ret void
 }
 
-define void @test_nonfolded_load1(i128* %addr) {
+define void @test_nonfolded_load1(ptr %addr) {
 ; CHECK-LABEL: test_nonfolded_load1:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #4
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 4
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.1 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.1, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 4
+  %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.1, ptr %addr
 
   ret void
 }
 
-define void @test_nonfolded_load2(i128* %addr) {
+define void @test_nonfolded_load2(ptr %addr) {
 ; CHECK-LABEL: test_nonfolded_load2:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #512
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 512
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.1 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.1, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 512
+  %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.1, ptr %addr
 
   ret void
 }
 
-define void @test_nonfolded_load3(i128* %addr) {
+define void @test_nonfolded_load3(ptr %addr) {
 ; CHECK-LABEL: test_nonfolded_load3:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: sub x[[ADDR:[0-9]+]], x0, #520
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
 ; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
 ; CHECK: mov v[[Q]].d[1], [[HI]]
 ; CHECK: str q[[Q]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 -520
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.1 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.1, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 -520
+  %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.1, ptr %addr
 
   ret void
 }
 
-define void @test_atomic_store(i128* %addr, i128 %val) {
+define void @test_atomic_store(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_atomic_store:
 
 ; CHECK: stp x2, x3, [x0]
-  store atomic i128 %val, i128* %addr monotonic, align 16
+  store atomic i128 %val, ptr %addr monotonic, align 16
 
 ; CHECK: stp x2, x3, [x0]
-  store atomic i128 %val, i128* %addr unordered, align 16
+  store atomic i128 %val, ptr %addr unordered, align 16
 
 ; CHECK: dmb ish
 ; CHECK: stp x2, x3, [x0]
-  store atomic i128 %val, i128* %addr release, align 16
+  store atomic i128 %val, ptr %addr release, align 16
 
 ; CHECK: dmb ish
 ; CHECK: stp x2, x3, [x0]
 ; CHECK: dmb ish
-  store atomic i128 %val, i128* %addr seq_cst, align 16
+  store atomic i128 %val, ptr %addr seq_cst, align 16
 
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: stp x2, x3, [x0, #8]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 8
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 8
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
 ; CHECK: stp x2, x3, [x0, #504]
-  %addr8.2 = getelementptr i8,  i8* %addr8, i32 504
-  %addr128.2 = bitcast i8* %addr8.2 to i128*
-  store atomic i128 %val, i128* %addr128.2 monotonic, align 16
+  %addr8.2 = getelementptr i8,  ptr %addr, i32 504
+  store atomic i128 %val, ptr %addr8.2 monotonic, align 16
 
 ; CHECK: stp x2, x3, [x0, #-512]
-  %addr8.3 = getelementptr i8,  i8* %addr8, i32 -512
-  %addr128.3 = bitcast i8* %addr8.3 to i128*
-  store atomic i128 %val, i128* %addr128.3 monotonic, align 16
+  %addr8.3 = getelementptr i8,  ptr %addr, i32 -512
+  store atomic i128 %val, ptr %addr8.3 monotonic, align 16
 
   ret void
 }
 
-define void @test_libcall_store(i128* %addr, i128 %val) {
+define void @test_libcall_store(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_libcall_store:
 ; CHECK: bl __atomic_store
-  store atomic i128 %val, i128* %addr unordered, align 8
+  store atomic i128 %val, ptr %addr unordered, align 8
 
   ret void
 }
 
-define void @test_nonfolded_store1(i128* %addr, i128 %val) {
+define void @test_nonfolded_store1(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_nonfolded_store1:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #4
 ; CHECK: stp x2, x3, [x[[ADDR]]]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 4
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 4
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
   ret void
 }
 
-define void @test_nonfolded_store2(i128* %addr, i128 %val) {
+define void @test_nonfolded_store2(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_nonfolded_store2:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #512
 ; CHECK: stp x2, x3, [x[[ADDR]]]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 512
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 512
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
   ret void
 }
 
-define void @test_nonfolded_store3(i128* %addr, i128 %val) {
+define void @test_nonfolded_store3(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_nonfolded_store3:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: sub x[[ADDR:[0-9]+]], x0, #520
 ; CHECK: stp x2, x3, [x[[ADDR]]]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 -520
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 -520
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
index 86055f0690f6..16a37f8f3d67 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=aarch64-apple-ios -stop-after=instruction-select -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 
-define void @test_varargs_sentinel(i8* %list, i64, i64, i64, i64, i64, i64, i64,
+define void @test_varargs_sentinel(ptr %list, i64, i64, i64, i64, i64, i64, i64,
                                    i32, ...) {
 ; CHECK-LABEL: name: test_varargs_sentinel
 ; CHECK: fixedStack:
@@ -9,8 +9,8 @@ define void @test_varargs_sentinel(i8* %list, i64, i64, i64, i64, i64, i64, i64,
 ; CHECK:   [[LIST:%[0-9]+]]:gpr64sp = COPY $x0
 ; CHECK:   [[VARARGS_AREA:%[0-9]+]]:gpr64common = ADDXri %fixed-stack.[[VARARGS_SLOT]], 0, 0
 ; CHECK:   STRXui [[VARARGS_AREA]], [[LIST]], 0 :: (store (s64) into %ir.list, align 1)
-  call void @llvm.va_start(i8* %list)
+  call void @llvm.va_start(ptr %list)
   ret void
 }
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll b/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll
index f83554339542..bd576d0f70e9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll
@@ -2,12 +2,12 @@
 ; RUN: llc -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o -  -mtriple=aarch64-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-LINUX %s
 
 
-declare void @llvm.va_start(i8*)
-define void @test_va_start(i8* %list) {
+declare void @llvm.va_start(ptr)
+define void @test_va_start(ptr %list) {
 ; CHECK-LABEL: name: test_va_start
 ; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK-IOS: G_VASTART [[LIST]](p0) :: (store (s64) into %ir.list, align 1)
 ; CHECK-LINUX: G_VASTART [[LIST]](p0) :: (store (s256) into %ir.list, align 1)
-  call void @llvm.va_start(i8* %list)
+  call void @llvm.va_start(ptr %list)
   ret void
 }


        


More information about the llvm-commits mailing list